diff options
Diffstat (limited to 'kernel/semaphore.c')
-rw-r--r-- | kernel/semaphore.c | 78 |
1 files changed, 24 insertions, 54 deletions
diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 5a12a8558982..bef977b16966 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c | |||
@@ -18,18 +18,8 @@ | |||
18 | * down_trylock() and up() can be called from interrupt context. | 18 | * down_trylock() and up() can be called from interrupt context. |
19 | * So we have to disable interrupts when taking the lock. | 19 | * So we have to disable interrupts when taking the lock. |
20 | * | 20 | * |
21 | * The ->count variable, if positive, defines how many more tasks can | 21 | * The ->count variable defines how many more tasks can acquire the |
22 | * acquire the semaphore. If negative, it represents how many tasks are | 22 | * semaphore. If it's zero, there may be tasks waiting on the list. |
23 | * waiting on the semaphore (*). If zero, no tasks are waiting, and no more | ||
24 | * tasks can acquire the semaphore. | ||
25 | * | ||
26 | * (*) Except for the window between one task calling up() and the task | ||
27 | * sleeping in a __down_common() waking up. In order to avoid a third task | ||
28 | * coming in and stealing the second task's wakeup, we leave the ->count | ||
29 | * negative. If we have a more complex situation, the ->count may become | ||
30 | * zero or negative (eg a semaphore with count = 2, three tasks attempt to | ||
31 | * acquire it, one sleeps, two finish and call up(), the second task to call | ||
32 | * up() notices that the list is empty and just increments count). | ||
33 | */ | 23 | */ |
34 | 24 | ||
35 | static noinline void __down(struct semaphore *sem); | 25 | static noinline void __down(struct semaphore *sem); |
@@ -43,7 +33,9 @@ void down(struct semaphore *sem) | |||
43 | unsigned long flags; | 33 | unsigned long flags; |
44 | 34 | ||
45 | spin_lock_irqsave(&sem->lock, flags); | 35 | spin_lock_irqsave(&sem->lock, flags); |
46 | if (unlikely(sem->count-- <= 0)) | 36 | if (likely(sem->count > 0)) |
37 | sem->count--; | ||
38 | else | ||
47 | __down(sem); | 39 | __down(sem); |
48 | spin_unlock_irqrestore(&sem->lock, flags); | 40 | spin_unlock_irqrestore(&sem->lock, flags); |
49 | } | 41 | } |
@@ -55,7 +47,9 @@ int down_interruptible(struct semaphore *sem) | |||
55 | int result = 0; | 47 | int result = 0; |
56 | 48 | ||
57 | spin_lock_irqsave(&sem->lock, flags); | 49 | spin_lock_irqsave(&sem->lock, flags); |
58 | if (unlikely(sem->count-- <= 0)) | 50 | if (likely(sem->count > 0)) |
51 | sem->count--; | ||
52 | else | ||
59 | result = __down_interruptible(sem); | 53 | result = __down_interruptible(sem); |
60 | spin_unlock_irqrestore(&sem->lock, flags); | 54 | spin_unlock_irqrestore(&sem->lock, flags); |
61 | 55 | ||
@@ -69,7 +63,9 @@ int down_killable(struct semaphore *sem) | |||
69 | int result = 0; | 63 | int result = 0; |
70 | 64 | ||
71 | spin_lock_irqsave(&sem->lock, flags); | 65 | spin_lock_irqsave(&sem->lock, flags); |
72 | if (unlikely(sem->count-- <= 0)) | 66 | if (likely(sem->count > 0)) |
67 | sem->count--; | ||
68 | else | ||
73 | result = __down_killable(sem); | 69 | result = __down_killable(sem); |
74 | spin_unlock_irqrestore(&sem->lock, flags); | 70 | spin_unlock_irqrestore(&sem->lock, flags); |
75 | 71 | ||
@@ -111,7 +107,9 @@ int down_timeout(struct semaphore *sem, long jiffies) | |||
111 | int result = 0; | 107 | int result = 0; |
112 | 108 | ||
113 | spin_lock_irqsave(&sem->lock, flags); | 109 | spin_lock_irqsave(&sem->lock, flags); |
114 | if (unlikely(sem->count-- <= 0)) | 110 | if (likely(sem->count > 0)) |
111 | sem->count--; | ||
112 | else | ||
115 | result = __down_timeout(sem, jiffies); | 113 | result = __down_timeout(sem, jiffies); |
116 | spin_unlock_irqrestore(&sem->lock, flags); | 114 | spin_unlock_irqrestore(&sem->lock, flags); |
117 | 115 | ||
@@ -124,7 +122,7 @@ void up(struct semaphore *sem) | |||
124 | unsigned long flags; | 122 | unsigned long flags; |
125 | 123 | ||
126 | spin_lock_irqsave(&sem->lock, flags); | 124 | spin_lock_irqsave(&sem->lock, flags); |
127 | if (likely(sem->count >= 0)) | 125 | if (likely(list_empty(&sem->wait_list))) |
128 | sem->count++; | 126 | sem->count++; |
129 | else | 127 | else |
130 | __up(sem); | 128 | __up(sem); |
@@ -141,22 +139,6 @@ struct semaphore_waiter { | |||
141 | }; | 139 | }; |
142 | 140 | ||
143 | /* | 141 | /* |
144 | * Wake up a process waiting on a semaphore. We need to call this from both | ||
145 | * __up and __down_common as it's possible to race a task into the semaphore | ||
146 | * if it comes in at just the right time between two tasks calling up() and | ||
147 | * a third task waking up. This function assumes the wait_list is already | ||
148 | * checked for being non-empty. | ||
149 | */ | ||
150 | static noinline void __sched __up_down_common(struct semaphore *sem) | ||
151 | { | ||
152 | struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, | ||
153 | struct semaphore_waiter, list); | ||
154 | list_del(&waiter->list); | ||
155 | waiter->up = 1; | ||
156 | wake_up_process(waiter->task); | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * Because this function is inlined, the 'state' parameter will be | 142 | * Because this function is inlined, the 'state' parameter will be |
161 | * constant, and thus optimised away by the compiler. Likewise the | 143 | * constant, and thus optimised away by the compiler. Likewise the |
162 | * 'timeout' parameter for the cases without timeouts. | 144 | * 'timeout' parameter for the cases without timeouts. |
@@ -164,7 +146,6 @@ static noinline void __sched __up_down_common(struct semaphore *sem) | |||
164 | static inline int __sched __down_common(struct semaphore *sem, long state, | 146 | static inline int __sched __down_common(struct semaphore *sem, long state, |
165 | long timeout) | 147 | long timeout) |
166 | { | 148 | { |
167 | int result = 0; | ||
168 | struct task_struct *task = current; | 149 | struct task_struct *task = current; |
169 | struct semaphore_waiter waiter; | 150 | struct semaphore_waiter waiter; |
170 | 151 | ||
@@ -184,28 +165,16 @@ static inline int __sched __down_common(struct semaphore *sem, long state, | |||
184 | timeout = schedule_timeout(timeout); | 165 | timeout = schedule_timeout(timeout); |
185 | spin_lock_irq(&sem->lock); | 166 | spin_lock_irq(&sem->lock); |
186 | if (waiter.up) | 167 | if (waiter.up) |
187 | goto woken; | 168 | return 0; |
188 | } | 169 | } |
189 | 170 | ||
190 | timed_out: | 171 | timed_out: |
191 | list_del(&waiter.list); | 172 | list_del(&waiter.list); |
192 | result = -ETIME; | 173 | return -ETIME; |
193 | goto woken; | 174 | |
194 | interrupted: | 175 | interrupted: |
195 | list_del(&waiter.list); | 176 | list_del(&waiter.list); |
196 | result = -EINTR; | 177 | return -EINTR; |
197 | woken: | ||
198 | /* | ||
199 | * Account for the process which woke us up. For the case where | ||
200 | * we're interrupted, we need to increment the count on our own | ||
201 | * behalf. I don't believe we can hit the case where the | ||
202 | * sem->count hits zero, *and* there's a second task sleeping, | ||
203 | * but it doesn't hurt, that's not a commonly exercised path and | ||
204 | * it's not a performance path either. | ||
205 | */ | ||
206 | if (unlikely((++sem->count >= 0) && !list_empty(&sem->wait_list))) | ||
207 | __up_down_common(sem); | ||
208 | return result; | ||
209 | } | 178 | } |
210 | 179 | ||
211 | static noinline void __sched __down(struct semaphore *sem) | 180 | static noinline void __sched __down(struct semaphore *sem) |
@@ -230,8 +199,9 @@ static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies) | |||
230 | 199 | ||
231 | static noinline void __sched __up(struct semaphore *sem) | 200 | static noinline void __sched __up(struct semaphore *sem) |
232 | { | 201 | { |
233 | if (unlikely(list_empty(&sem->wait_list))) | 202 | struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, |
234 | sem->count++; | 203 | struct semaphore_waiter, list); |
235 | else | 204 | list_del(&waiter->list); |
236 | __up_down_common(sem); | 205 | waiter->up = 1; |
206 | wake_up_process(waiter->task); | ||
237 | } | 207 | } |