diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-02-24 03:50:22 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-09-13 05:11:57 -0400 |
commit | 8292c9e15c3b069459794a04f5e2cf0d5665ddc4 (patch) | |
tree | 6dd98e626c2dc5fc38a7838295039226b301ee57 /kernel | |
parent | ee30a7b2fc072f139dac44826860d2c1f422137c (diff) |
locking, semaphores: Annotate inner lock as raw
There is no reason to have the spin_lock protecting the semaphore
preemptible on -rt. Annotate it as a raw_spinlock.
In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.
( On rt this also solves lockdep complaining about the
rt_mutex.wait_lock being not initialized. )
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/semaphore.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 94a62c0d4ade..d831841e55a7 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c | |||
@@ -54,12 +54,12 @@ void down(struct semaphore *sem) | |||
54 | { | 54 | { |
55 | unsigned long flags; | 55 | unsigned long flags; |
56 | 56 | ||
57 | spin_lock_irqsave(&sem->lock, flags); | 57 | raw_spin_lock_irqsave(&sem->lock, flags); |
58 | if (likely(sem->count > 0)) | 58 | if (likely(sem->count > 0)) |
59 | sem->count--; | 59 | sem->count--; |
60 | else | 60 | else |
61 | __down(sem); | 61 | __down(sem); |
62 | spin_unlock_irqrestore(&sem->lock, flags); | 62 | raw_spin_unlock_irqrestore(&sem->lock, flags); |
63 | } | 63 | } |
64 | EXPORT_SYMBOL(down); | 64 | EXPORT_SYMBOL(down); |
65 | 65 | ||
@@ -77,12 +77,12 @@ int down_interruptible(struct semaphore *sem) | |||
77 | unsigned long flags; | 77 | unsigned long flags; |
78 | int result = 0; | 78 | int result = 0; |
79 | 79 | ||
80 | spin_lock_irqsave(&sem->lock, flags); | 80 | raw_spin_lock_irqsave(&sem->lock, flags); |
81 | if (likely(sem->count > 0)) | 81 | if (likely(sem->count > 0)) |
82 | sem->count--; | 82 | sem->count--; |
83 | else | 83 | else |
84 | result = __down_interruptible(sem); | 84 | result = __down_interruptible(sem); |
85 | spin_unlock_irqrestore(&sem->lock, flags); | 85 | raw_spin_unlock_irqrestore(&sem->lock, flags); |
86 | 86 | ||
87 | return result; | 87 | return result; |
88 | } | 88 | } |
@@ -103,12 +103,12 @@ int down_killable(struct semaphore *sem) | |||
103 | unsigned long flags; | 103 | unsigned long flags; |
104 | int result = 0; | 104 | int result = 0; |
105 | 105 | ||
106 | spin_lock_irqsave(&sem->lock, flags); | 106 | raw_spin_lock_irqsave(&sem->lock, flags); |
107 | if (likely(sem->count > 0)) | 107 | if (likely(sem->count > 0)) |
108 | sem->count--; | 108 | sem->count--; |
109 | else | 109 | else |
110 | result = __down_killable(sem); | 110 | result = __down_killable(sem); |
111 | spin_unlock_irqrestore(&sem->lock, flags); | 111 | raw_spin_unlock_irqrestore(&sem->lock, flags); |
112 | 112 | ||
113 | return result; | 113 | return result; |
114 | } | 114 | } |
@@ -132,11 +132,11 @@ int down_trylock(struct semaphore *sem) | |||
132 | unsigned long flags; | 132 | unsigned long flags; |
133 | int count; | 133 | int count; |
134 | 134 | ||
135 | spin_lock_irqsave(&sem->lock, flags); | 135 | raw_spin_lock_irqsave(&sem->lock, flags); |
136 | count = sem->count - 1; | 136 | count = sem->count - 1; |
137 | if (likely(count >= 0)) | 137 | if (likely(count >= 0)) |
138 | sem->count = count; | 138 | sem->count = count; |
139 | spin_unlock_irqrestore(&sem->lock, flags); | 139 | raw_spin_unlock_irqrestore(&sem->lock, flags); |
140 | 140 | ||
141 | return (count < 0); | 141 | return (count < 0); |
142 | } | 142 | } |
@@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem, long jiffies) | |||
157 | unsigned long flags; | 157 | unsigned long flags; |
158 | int result = 0; | 158 | int result = 0; |
159 | 159 | ||
160 | spin_lock_irqsave(&sem->lock, flags); | 160 | raw_spin_lock_irqsave(&sem->lock, flags); |
161 | if (likely(sem->count > 0)) | 161 | if (likely(sem->count > 0)) |
162 | sem->count--; | 162 | sem->count--; |
163 | else | 163 | else |
164 | result = __down_timeout(sem, jiffies); | 164 | result = __down_timeout(sem, jiffies); |
165 | spin_unlock_irqrestore(&sem->lock, flags); | 165 | raw_spin_unlock_irqrestore(&sem->lock, flags); |
166 | 166 | ||
167 | return result; | 167 | return result; |
168 | } | 168 | } |
@@ -179,12 +179,12 @@ void up(struct semaphore *sem) | |||
179 | { | 179 | { |
180 | unsigned long flags; | 180 | unsigned long flags; |
181 | 181 | ||
182 | spin_lock_irqsave(&sem->lock, flags); | 182 | raw_spin_lock_irqsave(&sem->lock, flags); |
183 | if (likely(list_empty(&sem->wait_list))) | 183 | if (likely(list_empty(&sem->wait_list))) |
184 | sem->count++; | 184 | sem->count++; |
185 | else | 185 | else |
186 | __up(sem); | 186 | __up(sem); |
187 | spin_unlock_irqrestore(&sem->lock, flags); | 187 | raw_spin_unlock_irqrestore(&sem->lock, flags); |
188 | } | 188 | } |
189 | EXPORT_SYMBOL(up); | 189 | EXPORT_SYMBOL(up); |
190 | 190 | ||
@@ -217,9 +217,9 @@ static inline int __sched __down_common(struct semaphore *sem, long state, | |||
217 | if (timeout <= 0) | 217 | if (timeout <= 0) |
218 | goto timed_out; | 218 | goto timed_out; |
219 | __set_task_state(task, state); | 219 | __set_task_state(task, state); |
220 | spin_unlock_irq(&sem->lock); | 220 | raw_spin_unlock_irq(&sem->lock); |
221 | timeout = schedule_timeout(timeout); | 221 | timeout = schedule_timeout(timeout); |
222 | spin_lock_irq(&sem->lock); | 222 | raw_spin_lock_irq(&sem->lock); |
223 | if (waiter.up) | 223 | if (waiter.up) |
224 | return 0; | 224 | return 0; |
225 | } | 225 | } |