aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/latencytop.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-09-17 11:31:09 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-13 05:12:02 -0400
commit757455d41c390da4ae8e06468ad097fe42eaab41 (patch)
treeda0d9bbc29d38c4dae625d2503acaed3ae776302 /kernel/latencytop.c
parent2737c49f29a29f3d3645ba0778aa7a8798f32249 (diff)
locking, latencytop: Annotate latency_lock as raw
The latency_lock is lock can be taken in the guts of the scheduler code and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/latencytop.c')
-rw-r--r--kernel/latencytop.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 376066e1041..4ac8ebfcab5 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -58,7 +58,7 @@
58#include <linux/list.h> 58#include <linux/list.h>
59#include <linux/stacktrace.h> 59#include <linux/stacktrace.h>
60 60
61static DEFINE_SPINLOCK(latency_lock); 61static DEFINE_RAW_SPINLOCK(latency_lock);
62 62
63#define MAXLR 128 63#define MAXLR 128
64static struct latency_record latency_record[MAXLR]; 64static struct latency_record latency_record[MAXLR];
@@ -72,19 +72,19 @@ void clear_all_latency_tracing(struct task_struct *p)
72 if (!latencytop_enabled) 72 if (!latencytop_enabled)
73 return; 73 return;
74 74
75 spin_lock_irqsave(&latency_lock, flags); 75 raw_spin_lock_irqsave(&latency_lock, flags);
76 memset(&p->latency_record, 0, sizeof(p->latency_record)); 76 memset(&p->latency_record, 0, sizeof(p->latency_record));
77 p->latency_record_count = 0; 77 p->latency_record_count = 0;
78 spin_unlock_irqrestore(&latency_lock, flags); 78 raw_spin_unlock_irqrestore(&latency_lock, flags);
79} 79}
80 80
81static void clear_global_latency_tracing(void) 81static void clear_global_latency_tracing(void)
82{ 82{
83 unsigned long flags; 83 unsigned long flags;
84 84
85 spin_lock_irqsave(&latency_lock, flags); 85 raw_spin_lock_irqsave(&latency_lock, flags);
86 memset(&latency_record, 0, sizeof(latency_record)); 86 memset(&latency_record, 0, sizeof(latency_record));
87 spin_unlock_irqrestore(&latency_lock, flags); 87 raw_spin_unlock_irqrestore(&latency_lock, flags);
88} 88}
89 89
90static void __sched 90static void __sched
@@ -190,7 +190,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
190 lat.max = usecs; 190 lat.max = usecs;
191 store_stacktrace(tsk, &lat); 191 store_stacktrace(tsk, &lat);
192 192
193 spin_lock_irqsave(&latency_lock, flags); 193 raw_spin_lock_irqsave(&latency_lock, flags);
194 194
195 account_global_scheduler_latency(tsk, &lat); 195 account_global_scheduler_latency(tsk, &lat);
196 196
@@ -231,7 +231,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
231 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); 231 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
232 232
233out_unlock: 233out_unlock:
234 spin_unlock_irqrestore(&latency_lock, flags); 234 raw_spin_unlock_irqrestore(&latency_lock, flags);
235} 235}
236 236
237static int lstats_show(struct seq_file *m, void *v) 237static int lstats_show(struct seq_file *m, void *v)