diff options
Diffstat (limited to 'kernel/sched/stats.h')
-rw-r--r-- | kernel/sched/stats.h | 231 |
1 files changed, 231 insertions, 0 deletions
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h new file mode 100644 index 000000000000..2ef90a51ec5e --- /dev/null +++ b/kernel/sched/stats.h | |||
@@ -0,0 +1,231 @@ | |||
1 | |||
2 | #ifdef CONFIG_SCHEDSTATS | ||
3 | |||
4 | /* | ||
5 | * Expects runqueue lock to be held for atomicity of update | ||
6 | */ | ||
7 | static inline void | ||
8 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | ||
9 | { | ||
10 | if (rq) { | ||
11 | rq->rq_sched_info.run_delay += delta; | ||
12 | rq->rq_sched_info.pcount++; | ||
13 | } | ||
14 | } | ||
15 | |||
16 | /* | ||
17 | * Expects runqueue lock to be held for atomicity of update | ||
18 | */ | ||
19 | static inline void | ||
20 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | ||
21 | { | ||
22 | if (rq) | ||
23 | rq->rq_cpu_time += delta; | ||
24 | } | ||
25 | |||
26 | static inline void | ||
27 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | ||
28 | { | ||
29 | if (rq) | ||
30 | rq->rq_sched_info.run_delay += delta; | ||
31 | } | ||
32 | # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) | ||
33 | # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) | ||
34 | # define schedstat_set(var, val) do { var = (val); } while (0) | ||
35 | #else /* !CONFIG_SCHEDSTATS */ | ||
36 | static inline void | ||
37 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | ||
38 | {} | ||
39 | static inline void | ||
40 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | ||
41 | {} | ||
42 | static inline void | ||
43 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | ||
44 | {} | ||
45 | # define schedstat_inc(rq, field) do { } while (0) | ||
46 | # define schedstat_add(rq, field, amt) do { } while (0) | ||
47 | # define schedstat_set(var, val) do { } while (0) | ||
48 | #endif | ||
49 | |||
50 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | ||
51 | static inline void sched_info_reset_dequeued(struct task_struct *t) | ||
52 | { | ||
53 | t->sched_info.last_queued = 0; | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * We are interested in knowing how long it was from the *first* time a | ||
58 | * task was queued to the time that it finally hit a cpu, we call this routine | ||
59 | * from dequeue_task() to account for possible rq->clock skew across cpus. The | ||
60 | * delta taken on each cpu would annul the skew. | ||
61 | */ | ||
62 | static inline void sched_info_dequeued(struct task_struct *t) | ||
63 | { | ||
64 | unsigned long long now = task_rq(t)->clock, delta = 0; | ||
65 | |||
66 | if (unlikely(sched_info_on())) | ||
67 | if (t->sched_info.last_queued) | ||
68 | delta = now - t->sched_info.last_queued; | ||
69 | sched_info_reset_dequeued(t); | ||
70 | t->sched_info.run_delay += delta; | ||
71 | |||
72 | rq_sched_info_dequeued(task_rq(t), delta); | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * Called when a task finally hits the cpu. We can now calculate how | ||
77 | * long it was waiting to run. We also note when it began so that we | ||
78 | * can keep stats on how long its timeslice is. | ||
79 | */ | ||
80 | static void sched_info_arrive(struct task_struct *t) | ||
81 | { | ||
82 | unsigned long long now = task_rq(t)->clock, delta = 0; | ||
83 | |||
84 | if (t->sched_info.last_queued) | ||
85 | delta = now - t->sched_info.last_queued; | ||
86 | sched_info_reset_dequeued(t); | ||
87 | t->sched_info.run_delay += delta; | ||
88 | t->sched_info.last_arrival = now; | ||
89 | t->sched_info.pcount++; | ||
90 | |||
91 | rq_sched_info_arrive(task_rq(t), delta); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * This function is only called from enqueue_task(), but also only updates | ||
96 | * the timestamp if it is already not set. It's assumed that | ||
97 | * sched_info_dequeued() will clear that stamp when appropriate. | ||
98 | */ | ||
99 | static inline void sched_info_queued(struct task_struct *t) | ||
100 | { | ||
101 | if (unlikely(sched_info_on())) | ||
102 | if (!t->sched_info.last_queued) | ||
103 | t->sched_info.last_queued = task_rq(t)->clock; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Called when a process ceases being the active-running process, either | ||
108 | * voluntarily or involuntarily. Now we can calculate how long we ran. | ||
109 | * Also, if the process is still in the TASK_RUNNING state, call | ||
110 | * sched_info_queued() to mark that it has now again started waiting on | ||
111 | * the runqueue. | ||
112 | */ | ||
113 | static inline void sched_info_depart(struct task_struct *t) | ||
114 | { | ||
115 | unsigned long long delta = task_rq(t)->clock - | ||
116 | t->sched_info.last_arrival; | ||
117 | |||
118 | rq_sched_info_depart(task_rq(t), delta); | ||
119 | |||
120 | if (t->state == TASK_RUNNING) | ||
121 | sched_info_queued(t); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Called when tasks are switched involuntarily due, typically, to expiring | ||
126 | * their time slice. (This may also be called when switching to or from | ||
127 | * the idle task.) We are only called when prev != next. | ||
128 | */ | ||
129 | static inline void | ||
130 | __sched_info_switch(struct task_struct *prev, struct task_struct *next) | ||
131 | { | ||
132 | struct rq *rq = task_rq(prev); | ||
133 | |||
134 | /* | ||
135 | * prev now departs the cpu. It's not interesting to record | ||
136 | * stats about how efficient we were at scheduling the idle | ||
137 | * process, however. | ||
138 | */ | ||
139 | if (prev != rq->idle) | ||
140 | sched_info_depart(prev); | ||
141 | |||
142 | if (next != rq->idle) | ||
143 | sched_info_arrive(next); | ||
144 | } | ||
145 | static inline void | ||
146 | sched_info_switch(struct task_struct *prev, struct task_struct *next) | ||
147 | { | ||
148 | if (unlikely(sched_info_on())) | ||
149 | __sched_info_switch(prev, next); | ||
150 | } | ||
151 | #else | ||
152 | #define sched_info_queued(t) do { } while (0) | ||
153 | #define sched_info_reset_dequeued(t) do { } while (0) | ||
154 | #define sched_info_dequeued(t) do { } while (0) | ||
155 | #define sched_info_switch(t, next) do { } while (0) | ||
156 | #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ | ||
157 | |||
158 | /* | ||
159 | * The following are functions that support scheduler-internal time accounting. | ||
160 | * These functions are generally called at the timer tick. None of this depends | ||
161 | * on CONFIG_SCHEDSTATS. | ||
162 | */ | ||
163 | |||
164 | /** | ||
165 | * account_group_user_time - Maintain utime for a thread group. | ||
166 | * | ||
167 | * @tsk: Pointer to task structure. | ||
168 | * @cputime: Time value by which to increment the utime field of the | ||
169 | * thread_group_cputime structure. | ||
170 | * | ||
171 | * If thread group time is being maintained, get the structure for the | ||
172 | * running CPU and update the utime field there. | ||
173 | */ | ||
174 | static inline void account_group_user_time(struct task_struct *tsk, | ||
175 | cputime_t cputime) | ||
176 | { | ||
177 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | ||
178 | |||
179 | if (!cputimer->running) | ||
180 | return; | ||
181 | |||
182 | raw_spin_lock(&cputimer->lock); | ||
183 | cputimer->cputime.utime += cputime; | ||
184 | raw_spin_unlock(&cputimer->lock); | ||
185 | } | ||
186 | |||
187 | /** | ||
188 | * account_group_system_time - Maintain stime for a thread group. | ||
189 | * | ||
190 | * @tsk: Pointer to task structure. | ||
191 | * @cputime: Time value by which to increment the stime field of the | ||
192 | * thread_group_cputime structure. | ||
193 | * | ||
194 | * If thread group time is being maintained, get the structure for the | ||
195 | * running CPU and update the stime field there. | ||
196 | */ | ||
197 | static inline void account_group_system_time(struct task_struct *tsk, | ||
198 | cputime_t cputime) | ||
199 | { | ||
200 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | ||
201 | |||
202 | if (!cputimer->running) | ||
203 | return; | ||
204 | |||
205 | raw_spin_lock(&cputimer->lock); | ||
206 | cputimer->cputime.stime += cputime; | ||
207 | raw_spin_unlock(&cputimer->lock); | ||
208 | } | ||
209 | |||
210 | /** | ||
211 | * account_group_exec_runtime - Maintain exec runtime for a thread group. | ||
212 | * | ||
213 | * @tsk: Pointer to task structure. | ||
214 | * @ns: Time value by which to increment the sum_exec_runtime field | ||
215 | * of the thread_group_cputime structure. | ||
216 | * | ||
217 | * If thread group time is being maintained, get the structure for the | ||
218 | * running CPU and update the sum_exec_runtime field there. | ||
219 | */ | ||
220 | static inline void account_group_exec_runtime(struct task_struct *tsk, | ||
221 | unsigned long long ns) | ||
222 | { | ||
223 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | ||
224 | |||
225 | if (!cputimer->running) | ||
226 | return; | ||
227 | |||
228 | raw_spin_lock(&cputimer->lock); | ||
229 | cputimer->cputime.sum_exec_runtime += ns; | ||
230 | raw_spin_unlock(&cputimer->lock); | ||
231 | } | ||