aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWaiman Long <Waiman.Long@hp.com>2014-08-06 13:22:01 -0400
committerIngo Molnar <mingo@kernel.org>2014-08-13 04:33:34 -0400
commitf0bab73cb539fb803c4d419951e8d28aa4964f8f (patch)
tree0b9616befbd8a892eae3755fb389e2c4296e3d9d
parent4999201a59ef555f9105d2bb2459ed895627f7aa (diff)
locking/lockdep: Restrict the use of recursive read_lock() with qrwlock
Unlike the original unfair rwlock implementation, queued rwlock will grant lock according to the chronological sequence of the lock requests except when the lock requester is in the interrupt context. Consequently, recursive read_lock calls will now hang the process if there is a write_lock call somewhere in between the read_lock calls. This patch updates the lockdep implementation to look for recursive read_lock calls. A new read state (3) is used to mark those read_lock call that cannot be recursively called except in the interrupt context. The new read state does exhaust the 2 bits available in held_lock:read bit field. The addition of any new read state in the future may require a redesign of how all those bits are squeezed together in the held_lock structure. Signed-off-by: Waiman Long <Waiman.Long@hp.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Maarten Lankhorst <maarten.lankhorst@canonical.com> Cc: Rik van Riel <riel@redhat.com> Cc: Scott J Norton <scott.norton@hp.com> Cc: Fengguang Wu <fengguang.wu@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1407345722-61615-2-git-send-email-Waiman.Long@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/lockdep.h10
-rw-r--r--kernel/locking/lockdep.c6
2 files changed, 15 insertions, 1 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index f388481201cd..b5a84b62fb84 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -478,16 +478,24 @@ static inline void print_irqtrace_events(struct task_struct *curr)
478 * on the per lock-class debug mode: 478 * on the per lock-class debug mode:
479 */ 479 */
480 480
481/*
482 * Read states in the 2-bit held_lock:read field:
483 * 0: Exclusive lock
484 * 1: Shareable lock, cannot be recursively called
485 * 2: Shareable lock, can be recursively called
486 * 3: Shareable lock, cannot be recursively called except in interrupt context
487 */
481#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) 488#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
482#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) 489#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
483#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) 490#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
491#define lock_acquire_shared_irecursive(l, s, t, n, i) lock_acquire(l, s, t, 3, 1, n, i)
484 492
485#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 493#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
486#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 494#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
487#define spin_release(l, n, i) lock_release(l, n, i) 495#define spin_release(l, n, i) lock_release(l, n, i)
488 496
489#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 497#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
490#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 498#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_irecursive(l, s, t, NULL, i)
491#define rwlock_release(l, n, i) lock_release(l, n, i) 499#define rwlock_release(l, n, i) lock_release(l, n, i)
492 500
493#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 501#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 88d0d4420ad2..420ba685c4e5 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3597,6 +3597,12 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3597 raw_local_irq_save(flags); 3597 raw_local_irq_save(flags);
3598 check_flags(flags); 3598 check_flags(flags);
3599 3599
3600 /*
3601 * An interrupt recursive read in interrupt context can be considered
3602 * to be the same as a recursive read from checking perspective.
3603 */
3604 if ((read == 3) && in_interrupt())
3605 read = 2;
3600 current->lockdep_recursion = 1; 3606 current->lockdep_recursion = 1;
3601 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); 3607 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3602 __lock_acquire(lock, subclass, trylock, read, check, 3608 __lock_acquire(lock, subclass, trylock, read, check,