diff options
author | David S. Miller <davem@davemloft.net> | 2016-06-30 05:03:36 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-06-30 05:03:36 -0400 |
commit | ee58b57100ca953da7320c285315a95db2f7053d (patch) | |
tree | 77b815a31240adc4d6326346908137fc6c2c3a96 /kernel/locking/mutex.c | |
parent | 6f30e8b022c8e3a722928ddb1a2ae0be852fcc0e (diff) | |
parent | e7bdea7750eb2a64aea4a08fa5c0a31719c8155d (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Several cases of overlapping changes, except the packet scheduler
conflicts which deal with the addition of the free list parameter
to qdisc_enqueue().
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/locking/mutex.c')
-rw-r--r-- | kernel/locking/mutex.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index e364b424b019..a70b90db3909 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
@@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) | |||
486 | if (!hold_ctx) | 486 | if (!hold_ctx) |
487 | return 0; | 487 | return 0; |
488 | 488 | ||
489 | if (unlikely(ctx == hold_ctx)) | ||
490 | return -EALREADY; | ||
491 | |||
492 | if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && | 489 | if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && |
493 | (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { | 490 | (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { |
494 | #ifdef CONFIG_DEBUG_MUTEXES | 491 | #ifdef CONFIG_DEBUG_MUTEXES |
@@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
514 | unsigned long flags; | 511 | unsigned long flags; |
515 | int ret; | 512 | int ret; |
516 | 513 | ||
514 | if (use_ww_ctx) { | ||
515 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); | ||
516 | if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) | ||
517 | return -EALREADY; | ||
518 | } | ||
519 | |||
517 | preempt_disable(); | 520 | preempt_disable(); |
518 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); | 521 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
519 | 522 | ||
@@ -534,7 +537,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
534 | goto skip_wait; | 537 | goto skip_wait; |
535 | 538 | ||
536 | debug_mutex_lock_common(lock, &waiter); | 539 | debug_mutex_lock_common(lock, &waiter); |
537 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 540 | debug_mutex_add_waiter(lock, &waiter, task); |
538 | 541 | ||
539 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | 542 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
540 | list_add_tail(&waiter.list, &lock->wait_list); | 543 | list_add_tail(&waiter.list, &lock->wait_list); |
@@ -581,7 +584,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
581 | } | 584 | } |
582 | __set_task_state(task, TASK_RUNNING); | 585 | __set_task_state(task, TASK_RUNNING); |
583 | 586 | ||
584 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | 587 | mutex_remove_waiter(lock, &waiter, task); |
585 | /* set it to 0 if there are no waiters left: */ | 588 | /* set it to 0 if there are no waiters left: */ |
586 | if (likely(list_empty(&lock->wait_list))) | 589 | if (likely(list_empty(&lock->wait_list))) |
587 | atomic_set(&lock->count, 0); | 590 | atomic_set(&lock->count, 0); |
@@ -602,7 +605,7 @@ skip_wait: | |||
602 | return 0; | 605 | return 0; |
603 | 606 | ||
604 | err: | 607 | err: |
605 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); | 608 | mutex_remove_waiter(lock, &waiter, task); |
606 | spin_unlock_mutex(&lock->wait_lock, flags); | 609 | spin_unlock_mutex(&lock->wait_lock, flags); |
607 | debug_mutex_free_waiter(&waiter); | 610 | debug_mutex_free_waiter(&waiter); |
608 | mutex_release(&lock->dep_map, 1, ip); | 611 | mutex_release(&lock->dep_map, 1, ip); |