diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-07-25 12:56:56 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-09-13 05:11:55 -0400 |
commit | ee30a7b2fc072f139dac44826860d2c1f422137c (patch) | |
tree | 4f11173879b25cc31822cb0a8d9dbd48395e74bc /kernel/posix-cpu-timers.c | |
parent | 07354eb1a74d1e1ece29f8bafe0b46e8c77a95ef (diff) |
locking, sched: Annotate thread_group_cputimer as raw
The thread_group_cputimer lock can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.
In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r-- | kernel/posix-cpu-timers.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 58f405b581e7..41440cca55a2 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -274,7 +274,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | |||
274 | struct task_cputime sum; | 274 | struct task_cputime sum; |
275 | unsigned long flags; | 275 | unsigned long flags; |
276 | 276 | ||
277 | spin_lock_irqsave(&cputimer->lock, flags); | 277 | raw_spin_lock_irqsave(&cputimer->lock, flags); |
278 | if (!cputimer->running) { | 278 | if (!cputimer->running) { |
279 | cputimer->running = 1; | 279 | cputimer->running = 1; |
280 | /* | 280 | /* |
@@ -287,7 +287,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | |||
287 | update_gt_cputime(&cputimer->cputime, &sum); | 287 | update_gt_cputime(&cputimer->cputime, &sum); |
288 | } | 288 | } |
289 | *times = cputimer->cputime; | 289 | *times = cputimer->cputime; |
290 | spin_unlock_irqrestore(&cputimer->lock, flags); | 290 | raw_spin_unlock_irqrestore(&cputimer->lock, flags); |
291 | } | 291 | } |
292 | 292 | ||
293 | /* | 293 | /* |
@@ -997,9 +997,9 @@ static void stop_process_timers(struct signal_struct *sig) | |||
997 | struct thread_group_cputimer *cputimer = &sig->cputimer; | 997 | struct thread_group_cputimer *cputimer = &sig->cputimer; |
998 | unsigned long flags; | 998 | unsigned long flags; |
999 | 999 | ||
1000 | spin_lock_irqsave(&cputimer->lock, flags); | 1000 | raw_spin_lock_irqsave(&cputimer->lock, flags); |
1001 | cputimer->running = 0; | 1001 | cputimer->running = 0; |
1002 | spin_unlock_irqrestore(&cputimer->lock, flags); | 1002 | raw_spin_unlock_irqrestore(&cputimer->lock, flags); |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | static u32 onecputick; | 1005 | static u32 onecputick; |
@@ -1289,9 +1289,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk) | |||
1289 | if (sig->cputimer.running) { | 1289 | if (sig->cputimer.running) { |
1290 | struct task_cputime group_sample; | 1290 | struct task_cputime group_sample; |
1291 | 1291 | ||
1292 | spin_lock(&sig->cputimer.lock); | 1292 | raw_spin_lock(&sig->cputimer.lock); |
1293 | group_sample = sig->cputimer.cputime; | 1293 | group_sample = sig->cputimer.cputime; |
1294 | spin_unlock(&sig->cputimer.lock); | 1294 | raw_spin_unlock(&sig->cputimer.lock); |
1295 | 1295 | ||
1296 | if (task_cputime_expired(&group_sample, &sig->cputime_expires)) | 1296 | if (task_cputime_expired(&group_sample, &sig->cputime_expires)) |
1297 | return 1; | 1297 | return 1; |