aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_stats.h
diff options
context:
space:
mode:
authorFrank Mayhar <fmayhar@google.com>2008-09-12 12:54:39 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-23 07:38:44 -0400
commitbb34d92f643086d546b49cef680f6f305ed84414 (patch)
tree275887040c96971e133fa20d99517c1fcea76415 /kernel/sched_stats.h
parent5ce73a4a5a4893a1aa4cdeed1b1a5a6de42c43b6 (diff)
timers: fix itimer/many thread hang, v2
This is the second resubmission of the posix timer rework patch, posted a few days ago. This includes the changes from the previous resubmittion, which addressed Oleg Nesterov's comments, removing the RCU stuff from the patch and un-inlining the thread_group_cputime() function for SMP. In addition, per Ingo Molnar it simplifies the UP code, consolidating much of it with the SMP version and depending on lower-level SMP/UP handling to take care of the differences. It also cleans up some UP compile errors, moves the scheduler stats-related macros into kernel/sched_stats.h, cleans up a merge error in kernel/fork.c and has a few other minor fixes and cleanups as suggested by Oleg and Ingo. Thanks for the review, guys. Signed-off-by: Frank Mayhar <fmayhar@google.com> Cc: Roland McGrath <roland@redhat.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_stats.h')
-rw-r--r--kernel/sched_stats.h136
1 files changed, 136 insertions, 0 deletions
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 8385d43987e2..d6903bd0c7a8 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -270,3 +270,139 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
270#define sched_info_switch(t, next) do { } while (0) 270#define sched_info_switch(t, next) do { } while (0)
271#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ 271#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
272 272
273/*
274 * The following are functions that support scheduler-internal time accounting.
275 * These functions are generally called at the timer tick. None of this depends
276 * on CONFIG_SCHEDSTATS.
277 */
278
279#ifdef CONFIG_SMP
280
281/**
282 * thread_group_cputime_account_user - Maintain utime for a thread group.
283 *
284 * @tgtimes: Pointer to thread_group_cputime structure.
285 * @cputime: Time value by which to increment the utime field of that
286 * structure.
287 *
288 * If thread group time is being maintained, get the structure for the
289 * running CPU and update the utime field there.
290 */
291static inline void thread_group_cputime_account_user(
292 struct thread_group_cputime *tgtimes,
293 cputime_t cputime)
294{
295 if (tgtimes->totals) {
296 struct task_cputime *times;
297
298 times = per_cpu_ptr(tgtimes->totals, get_cpu());
299 times->utime = cputime_add(times->utime, cputime);
300 put_cpu_no_resched();
301 }
302}
303
304/**
305 * thread_group_cputime_account_system - Maintain stime for a thread group.
306 *
307 * @tgtimes: Pointer to thread_group_cputime structure.
308 * @cputime: Time value by which to increment the stime field of that
309 * structure.
310 *
311 * If thread group time is being maintained, get the structure for the
312 * running CPU and update the stime field there.
313 */
314static inline void thread_group_cputime_account_system(
315 struct thread_group_cputime *tgtimes,
316 cputime_t cputime)
317{
318 if (tgtimes->totals) {
319 struct task_cputime *times;
320
321 times = per_cpu_ptr(tgtimes->totals, get_cpu());
322 times->stime = cputime_add(times->stime, cputime);
323 put_cpu_no_resched();
324 }
325}
326
327/**
328 * thread_group_cputime_account_exec_runtime - Maintain exec runtime for a
329 * thread group.
330 *
331 * @tgtimes: Pointer to thread_group_cputime structure.
332 * @ns: Time value by which to increment the sum_exec_runtime field
333 * of that structure.
334 *
335 * If thread group time is being maintained, get the structure for the
336 * running CPU and update the sum_exec_runtime field there.
337 */
338static inline void thread_group_cputime_account_exec_runtime(
339 struct thread_group_cputime *tgtimes,
340 unsigned long long ns)
341{
342 if (tgtimes->totals) {
343 struct task_cputime *times;
344
345 times = per_cpu_ptr(tgtimes->totals, get_cpu());
346 times->sum_exec_runtime += ns;
347 put_cpu_no_resched();
348 }
349}
350
351#else /* CONFIG_SMP */
352
353static inline void thread_group_cputime_account_user(
354 struct thread_group_cputime *tgtimes,
355 cputime_t cputime)
356{
357 tgtimes->totals->utime = cputime_add(tgtimes->totals->utime, cputime);
358}
359
360static inline void thread_group_cputime_account_system(
361 struct thread_group_cputime *tgtimes,
362 cputime_t cputime)
363{
364 tgtimes->totals->stime = cputime_add(tgtimes->totals->stime, cputime);
365}
366
367static inline void thread_group_cputime_account_exec_runtime(
368 struct thread_group_cputime *tgtimes,
369 unsigned long long ns)
370{
371 tgtimes->totals->sum_exec_runtime += ns;
372}
373
374#endif /* CONFIG_SMP */
375
376/*
377 * These are the generic time-accounting routines that use the above
378 * functions. They are the functions actually called by the scheduler.
379 */
380static inline void account_group_user_time(struct task_struct *tsk,
381 cputime_t cputime)
382{
383 struct signal_struct *sig;
384
385 sig = tsk->signal;
386 if (likely(sig))
387 thread_group_cputime_account_user(&sig->cputime, cputime);
388}
389
390static inline void account_group_system_time(struct task_struct *tsk,
391 cputime_t cputime)
392{
393 struct signal_struct *sig;
394
395 sig = tsk->signal;
396 if (likely(sig))
397 thread_group_cputime_account_system(&sig->cputime, cputime);
398}
399
400static inline void account_group_exec_runtime(struct task_struct *tsk,
401 unsigned long long ns)
402{
403 struct signal_struct *sig;
404
405 sig = tsk->signal;
406 if (likely(sig))
407 thread_group_cputime_account_exec_runtime(&sig->cputime, ns);
408}