diff options
| -rw-r--r-- | kernel/sched_stats.h | 20 |
1 files changed, 1 insertions, 19 deletions
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 25c2f962f6fc..48ddf431db0e 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
| @@ -157,15 +157,7 @@ static inline void sched_info_reset_dequeued(struct task_struct *t) | |||
| 157 | } | 157 | } |
| 158 | 158 | ||
| 159 | /* | 159 | /* |
| 160 | * Called when a process is dequeued from the active array and given | 160 | * We are interested in knowing how long it was from the *first* time a |
| 161 | * the cpu. We should note that with the exception of interactive | ||
| 162 | * tasks, the expired queue will become the active queue after the active | ||
| 163 | * queue is empty, without explicitly dequeuing and requeuing tasks in the | ||
| 164 | * expired queue. (Interactive tasks may be requeued directly to the | ||
| 165 | * active queue, thus delaying tasks in the expired queue from running; | ||
| 166 | * see scheduler_tick()). | ||
| 167 | * | ||
| 168 | * Though we are interested in knowing how long it was from the *first* time a | ||
| 169 | * task was queued to the time that it finally hit a cpu, we call this routine | 161 | * task was queued to the time that it finally hit a cpu, we call this routine |
| 170 | * from dequeue_task() to account for possible rq->clock skew across cpus. The | 162 | * from dequeue_task() to account for possible rq->clock skew across cpus. The |
| 171 | * delta taken on each cpu would annul the skew. | 163 | * delta taken on each cpu would annul the skew. |
| @@ -203,16 +195,6 @@ static void sched_info_arrive(struct task_struct *t) | |||
| 203 | } | 195 | } |
| 204 | 196 | ||
| 205 | /* | 197 | /* |
| 206 | * Called when a process is queued into either the active or expired | ||
| 207 | * array. The time is noted and later used to determine how long we | ||
| 208 | * had to wait for us to reach the cpu. Since the expired queue will | ||
| 209 | * become the active queue after active queue is empty, without dequeuing | ||
| 210 | * and requeuing any tasks, we are interested in queuing to either. It | ||
| 211 | * is unusual but not impossible for tasks to be dequeued and immediately | ||
| 212 | * requeued in the same or another array: this can happen in sched_yield(), | ||
| 213 | * set_user_nice(), and even load_balance() as it moves tasks from runqueue | ||
| 214 | * to runqueue. | ||
| 215 | * | ||
| 216 | * This function is only called from enqueue_task(), but also only updates | 198 | * This function is only called from enqueue_task(), but also only updates |
| 217 | * the timestamp if it is already not set. It's assumed that | 199 | * the timestamp if it is already not set. It's assumed that |
| 218 | * sched_info_dequeued() will clear that stamp when appropriate. | 200 | * sched_info_dequeued() will clear that stamp when appropriate. |
