diff options
Diffstat (limited to 'kernel/sched_stats.h')
| -rw-r--r-- | kernel/sched_stats.h | 42 |
1 files changed, 33 insertions, 9 deletions
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 80179ef7450e..8385d43987e2 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
| @@ -118,6 +118,13 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |||
| 118 | if (rq) | 118 | if (rq) |
| 119 | rq->rq_sched_info.cpu_time += delta; | 119 | rq->rq_sched_info.cpu_time += delta; |
| 120 | } | 120 | } |
| 121 | |||
| 122 | static inline void | ||
| 123 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | ||
| 124 | { | ||
| 125 | if (rq) | ||
| 126 | rq->rq_sched_info.run_delay += delta; | ||
| 127 | } | ||
| 121 | # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) | 128 | # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) |
| 122 | # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) | 129 | # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) |
| 123 | # define schedstat_set(var, val) do { var = (val); } while (0) | 130 | # define schedstat_set(var, val) do { var = (val); } while (0) |
| @@ -126,6 +133,9 @@ static inline void | |||
| 126 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 133 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) |
| 127 | {} | 134 | {} |
| 128 | static inline void | 135 | static inline void |
| 136 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | ||
| 137 | {} | ||
| 138 | static inline void | ||
| 129 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 139 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
| 130 | {} | 140 | {} |
| 131 | # define schedstat_inc(rq, field) do { } while (0) | 141 | # define schedstat_inc(rq, field) do { } while (0) |
| @@ -134,6 +144,11 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |||
| 134 | #endif | 144 | #endif |
| 135 | 145 | ||
| 136 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 146 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
| 147 | static inline void sched_info_reset_dequeued(struct task_struct *t) | ||
| 148 | { | ||
| 149 | t->sched_info.last_queued = 0; | ||
| 150 | } | ||
| 151 | |||
| 137 | /* | 152 | /* |
| 138 | * Called when a process is dequeued from the active array and given | 153 | * Called when a process is dequeued from the active array and given |
| 139 | * the cpu. We should note that with the exception of interactive | 154 | * the cpu. We should note that with the exception of interactive |
| @@ -143,15 +158,22 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |||
| 143 | * active queue, thus delaying tasks in the expired queue from running; | 158 | * active queue, thus delaying tasks in the expired queue from running; |
| 144 | * see scheduler_tick()). | 159 | * see scheduler_tick()). |
| 145 | * | 160 | * |
| 146 | * This function is only called from sched_info_arrive(), rather than | 161 | * Though we are interested in knowing how long it was from the *first* time a |
| 147 | * dequeue_task(). Even though a task may be queued and dequeued multiple | 162 | * task was queued to the time that it finally hit a cpu, we call this routine |
| 148 | * times as it is shuffled about, we're really interested in knowing how | 163 | * from dequeue_task() to account for possible rq->clock skew across cpus. The |
| 149 | * long it was from the *first* time it was queued to the time that it | 164 | * delta taken on each cpu would annul the skew. |
| 150 | * finally hit a cpu. | ||
| 151 | */ | 165 | */ |
| 152 | static inline void sched_info_dequeued(struct task_struct *t) | 166 | static inline void sched_info_dequeued(struct task_struct *t) |
| 153 | { | 167 | { |
| 154 | t->sched_info.last_queued = 0; | 168 | unsigned long long now = task_rq(t)->clock, delta = 0; |
| 169 | |||
| 170 | if (unlikely(sched_info_on())) | ||
| 171 | if (t->sched_info.last_queued) | ||
| 172 | delta = now - t->sched_info.last_queued; | ||
| 173 | sched_info_reset_dequeued(t); | ||
| 174 | t->sched_info.run_delay += delta; | ||
| 175 | |||
| 176 | rq_sched_info_dequeued(task_rq(t), delta); | ||
| 155 | } | 177 | } |
| 156 | 178 | ||
| 157 | /* | 179 | /* |
| @@ -165,7 +187,7 @@ static void sched_info_arrive(struct task_struct *t) | |||
| 165 | 187 | ||
| 166 | if (t->sched_info.last_queued) | 188 | if (t->sched_info.last_queued) |
| 167 | delta = now - t->sched_info.last_queued; | 189 | delta = now - t->sched_info.last_queued; |
| 168 | sched_info_dequeued(t); | 190 | sched_info_reset_dequeued(t); |
| 169 | t->sched_info.run_delay += delta; | 191 | t->sched_info.run_delay += delta; |
| 170 | t->sched_info.last_arrival = now; | 192 | t->sched_info.last_arrival = now; |
| 171 | t->sched_info.pcount++; | 193 | t->sched_info.pcount++; |
| @@ -242,7 +264,9 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) | |||
| 242 | __sched_info_switch(prev, next); | 264 | __sched_info_switch(prev, next); |
| 243 | } | 265 | } |
| 244 | #else | 266 | #else |
| 245 | #define sched_info_queued(t) do { } while (0) | 267 | #define sched_info_queued(t) do { } while (0) |
| 246 | #define sched_info_switch(t, next) do { } while (0) | 268 | #define sched_info_reset_dequeued(t) do { } while (0) |
| 269 | #define sched_info_dequeued(t) do { } while (0) | ||
| 270 | #define sched_info_switch(t, next) do { } while (0) | ||
| 247 | #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ | 271 | #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ |
| 248 | 272 | ||
