diff options
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 113 |
1 files changed, 78 insertions, 35 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 56b7d4b83947..51a45502d8a6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -20,8 +20,8 @@ | |||
20 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra | 20 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/latencytop.h> | ||
24 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | #include <linux/latencytop.h> | ||
25 | #include <linux/cpumask.h> | 25 | #include <linux/cpumask.h> |
26 | #include <linux/cpuidle.h> | 26 | #include <linux/cpuidle.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
@@ -755,7 +755,9 @@ static void | |||
755 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | 755 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) |
756 | { | 756 | { |
757 | struct task_struct *p; | 757 | struct task_struct *p; |
758 | u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start; | 758 | u64 delta; |
759 | |||
760 | delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start; | ||
759 | 761 | ||
760 | if (entity_is_task(se)) { | 762 | if (entity_is_task(se)) { |
761 | p = task_of(se); | 763 | p = task_of(se); |
@@ -776,22 +778,12 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
776 | se->statistics.wait_sum += delta; | 778 | se->statistics.wait_sum += delta; |
777 | se->statistics.wait_start = 0; | 779 | se->statistics.wait_start = 0; |
778 | } | 780 | } |
779 | #else | ||
780 | static inline void | ||
781 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
782 | { | ||
783 | } | ||
784 | |||
785 | static inline void | ||
786 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
787 | { | ||
788 | } | ||
789 | #endif | ||
790 | 781 | ||
791 | /* | 782 | /* |
792 | * Task is being enqueued - update stats: | 783 | * Task is being enqueued - update stats: |
793 | */ | 784 | */ |
794 | static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 785 | static inline void |
786 | update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
795 | { | 787 | { |
796 | /* | 788 | /* |
797 | * Are we enqueueing a waiting task? (for current tasks | 789 | * Are we enqueueing a waiting task? (for current tasks |
@@ -802,7 +794,7 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
802 | } | 794 | } |
803 | 795 | ||
804 | static inline void | 796 | static inline void |
805 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 797 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
806 | { | 798 | { |
807 | /* | 799 | /* |
808 | * Mark the end of the wait period if dequeueing a | 800 | * Mark the end of the wait period if dequeueing a |
@@ -810,7 +802,40 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
810 | */ | 802 | */ |
811 | if (se != cfs_rq->curr) | 803 | if (se != cfs_rq->curr) |
812 | update_stats_wait_end(cfs_rq, se); | 804 | update_stats_wait_end(cfs_rq, se); |
805 | |||
806 | if (flags & DEQUEUE_SLEEP) { | ||
807 | if (entity_is_task(se)) { | ||
808 | struct task_struct *tsk = task_of(se); | ||
809 | |||
810 | if (tsk->state & TASK_INTERRUPTIBLE) | ||
811 | se->statistics.sleep_start = rq_clock(rq_of(cfs_rq)); | ||
812 | if (tsk->state & TASK_UNINTERRUPTIBLE) | ||
813 | se->statistics.block_start = rq_clock(rq_of(cfs_rq)); | ||
814 | } | ||
815 | } | ||
816 | |||
817 | } | ||
818 | #else | ||
819 | static inline void | ||
820 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
821 | { | ||
822 | } | ||
823 | |||
824 | static inline void | ||
825 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
826 | { | ||
827 | } | ||
828 | |||
829 | static inline void | ||
830 | update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
831 | { | ||
832 | } | ||
833 | |||
834 | static inline void | ||
835 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | ||
836 | { | ||
813 | } | 837 | } |
838 | #endif | ||
814 | 839 | ||
815 | /* | 840 | /* |
816 | * We are picking a new current task - update its stats: | 841 | * We are picking a new current task - update its stats: |
@@ -3102,6 +3127,26 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
3102 | 3127 | ||
3103 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq); | 3128 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq); |
3104 | 3129 | ||
3130 | static inline void check_schedstat_required(void) | ||
3131 | { | ||
3132 | #ifdef CONFIG_SCHEDSTATS | ||
3133 | if (schedstat_enabled()) | ||
3134 | return; | ||
3135 | |||
3136 | /* Force schedstat enabled if a dependent tracepoint is active */ | ||
3137 | if (trace_sched_stat_wait_enabled() || | ||
3138 | trace_sched_stat_sleep_enabled() || | ||
3139 | trace_sched_stat_iowait_enabled() || | ||
3140 | trace_sched_stat_blocked_enabled() || | ||
3141 | trace_sched_stat_runtime_enabled()) { | ||
3142 | pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, " | ||
3143 | "stat_blocked and stat_runtime require the " | ||
3144 | "kernel parameter schedstats=enabled or " | ||
3145 | "kernel.sched_schedstats=1\n"); | ||
3146 | } | ||
3147 | #endif | ||
3148 | } | ||
3149 | |||
3105 | static void | 3150 | static void |
3106 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | 3151 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
3107 | { | 3152 | { |
@@ -3122,11 +3167,15 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
3122 | 3167 | ||
3123 | if (flags & ENQUEUE_WAKEUP) { | 3168 | if (flags & ENQUEUE_WAKEUP) { |
3124 | place_entity(cfs_rq, se, 0); | 3169 | place_entity(cfs_rq, se, 0); |
3125 | enqueue_sleeper(cfs_rq, se); | 3170 | if (schedstat_enabled()) |
3171 | enqueue_sleeper(cfs_rq, se); | ||
3126 | } | 3172 | } |
3127 | 3173 | ||
3128 | update_stats_enqueue(cfs_rq, se); | 3174 | check_schedstat_required(); |
3129 | check_spread(cfs_rq, se); | 3175 | if (schedstat_enabled()) { |
3176 | update_stats_enqueue(cfs_rq, se); | ||
3177 | check_spread(cfs_rq, se); | ||
3178 | } | ||
3130 | if (se != cfs_rq->curr) | 3179 | if (se != cfs_rq->curr) |
3131 | __enqueue_entity(cfs_rq, se); | 3180 | __enqueue_entity(cfs_rq, se); |
3132 | se->on_rq = 1; | 3181 | se->on_rq = 1; |
@@ -3193,19 +3242,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
3193 | update_curr(cfs_rq); | 3242 | update_curr(cfs_rq); |
3194 | dequeue_entity_load_avg(cfs_rq, se); | 3243 | dequeue_entity_load_avg(cfs_rq, se); |
3195 | 3244 | ||
3196 | update_stats_dequeue(cfs_rq, se); | 3245 | if (schedstat_enabled()) |
3197 | if (flags & DEQUEUE_SLEEP) { | 3246 | update_stats_dequeue(cfs_rq, se, flags); |
3198 | #ifdef CONFIG_SCHEDSTATS | ||
3199 | if (entity_is_task(se)) { | ||
3200 | struct task_struct *tsk = task_of(se); | ||
3201 | |||
3202 | if (tsk->state & TASK_INTERRUPTIBLE) | ||
3203 | se->statistics.sleep_start = rq_clock(rq_of(cfs_rq)); | ||
3204 | if (tsk->state & TASK_UNINTERRUPTIBLE) | ||
3205 | se->statistics.block_start = rq_clock(rq_of(cfs_rq)); | ||
3206 | } | ||
3207 | #endif | ||
3208 | } | ||
3209 | 3247 | ||
3210 | clear_buddies(cfs_rq, se); | 3248 | clear_buddies(cfs_rq, se); |
3211 | 3249 | ||
@@ -3279,7 +3317,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
3279 | * a CPU. So account for the time it spent waiting on the | 3317 | * a CPU. So account for the time it spent waiting on the |
3280 | * runqueue. | 3318 | * runqueue. |
3281 | */ | 3319 | */ |
3282 | update_stats_wait_end(cfs_rq, se); | 3320 | if (schedstat_enabled()) |
3321 | update_stats_wait_end(cfs_rq, se); | ||
3283 | __dequeue_entity(cfs_rq, se); | 3322 | __dequeue_entity(cfs_rq, se); |
3284 | update_load_avg(se, 1); | 3323 | update_load_avg(se, 1); |
3285 | } | 3324 | } |
@@ -3292,7 +3331,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
3292 | * least twice that of our own weight (i.e. dont track it | 3331 | * least twice that of our own weight (i.e. dont track it |
3293 | * when there are only lesser-weight tasks around): | 3332 | * when there are only lesser-weight tasks around): |
3294 | */ | 3333 | */ |
3295 | if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { | 3334 | if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { |
3296 | se->statistics.slice_max = max(se->statistics.slice_max, | 3335 | se->statistics.slice_max = max(se->statistics.slice_max, |
3297 | se->sum_exec_runtime - se->prev_sum_exec_runtime); | 3336 | se->sum_exec_runtime - se->prev_sum_exec_runtime); |
3298 | } | 3337 | } |
@@ -3375,9 +3414,13 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | |||
3375 | /* throttle cfs_rqs exceeding runtime */ | 3414 | /* throttle cfs_rqs exceeding runtime */ |
3376 | check_cfs_rq_runtime(cfs_rq); | 3415 | check_cfs_rq_runtime(cfs_rq); |
3377 | 3416 | ||
3378 | check_spread(cfs_rq, prev); | 3417 | if (schedstat_enabled()) { |
3418 | check_spread(cfs_rq, prev); | ||
3419 | if (prev->on_rq) | ||
3420 | update_stats_wait_start(cfs_rq, prev); | ||
3421 | } | ||
3422 | |||
3379 | if (prev->on_rq) { | 3423 | if (prev->on_rq) { |
3380 | update_stats_wait_start(cfs_rq, prev); | ||
3381 | /* Put 'current' back into the tree. */ | 3424 | /* Put 'current' back into the tree. */ |
3382 | __enqueue_entity(cfs_rq, prev); | 3425 | __enqueue_entity(cfs_rq, prev); |
3383 | /* in !on_rq case, update occurred at dequeue */ | 3426 | /* in !on_rq case, update occurred at dequeue */ |