aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/futex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/futex.c')
-rw-r--r--kernel/futex.c28
1 files changed, 13 insertions, 15 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index aca8d10704f6..5efa2f978032 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -201,21 +201,6 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
201 * from swap. But that's a lot of code to duplicate here 201 * from swap. But that's a lot of code to duplicate here
202 * for a rare case, so we simply fetch the page. 202 * for a rare case, so we simply fetch the page.
203 */ 203 */
204
205 /*
206 * Do a quick atomic lookup first - this is the fastpath.
207 */
208 page = follow_page(mm, uaddr, FOLL_TOUCH|FOLL_GET);
209 if (likely(page != NULL)) {
210 key->shared.pgoff =
211 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
212 put_page(page);
213 return 0;
214 }
215
216 /*
217 * Do it the general way.
218 */
219 err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL); 204 err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL);
220 if (err >= 0) { 205 if (err >= 0) {
221 key->shared.pgoff = 206 key->shared.pgoff =
@@ -285,7 +270,13 @@ static void wake_futex(struct futex_q *q)
285 /* 270 /*
286 * The waiting task can free the futex_q as soon as this is written, 271 * The waiting task can free the futex_q as soon as this is written,
287 * without taking any locks. This must come last. 272 * without taking any locks. This must come last.
273 *
274 * A memory barrier is required here to prevent the following store
275 * to lock_ptr from getting ahead of the wakeup. Clearing the lock
276 * at the end of wake_up_all() does not prevent this store from
277 * moving.
288 */ 278 */
279 wmb();
289 q->lock_ptr = NULL; 280 q->lock_ptr = NULL;
290} 281}
291 282
@@ -365,6 +356,13 @@ retry:
365 if (bh1 != bh2) 356 if (bh1 != bh2)
366 spin_unlock(&bh2->lock); 357 spin_unlock(&bh2->lock);
367 358
359#ifndef CONFIG_MMU
360 /* we don't get EFAULT from MMU faults if we don't have an MMU,
361 * but we might get them from range checking */
362 ret = op_ret;
363 goto out;
364#endif
365
368 if (unlikely(op_ret != -EFAULT)) { 366 if (unlikely(op_ret != -EFAULT)) {
369 ret = op_ret; 367 ret = op_ret;
370 goto out; 368 goto out;