diff options
| author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-08-11 03:30:24 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-08-11 03:30:24 -0400 |
| commit | 7531e2f34d1d551b096143f19111139f0dd84c8b (patch) | |
| tree | 0a29d6703e28dc6752b9b4085594cca238595aac /include/linux | |
| parent | 4f3e7524b2e703d9f8b02ac338153a53dd7ede66 (diff) | |
lockdep: lock protection locks
On Fri, 2008-08-01 at 16:26 -0700, Linus Torvalds wrote:
> On Fri, 1 Aug 2008, David Miller wrote:
> >
> > Taking more than a few locks of the same class at once is bad
> > news and it's better to find an alternative method.
>
> It's not always wrong.
>
> If you can guarantee that anybody that takes more than one lock of a
> particular class will always take a single top-level lock _first_, then
> that's all good. You can obviously screw up and take the same lock _twice_
> (which will deadlock), but at least you cannot get into ABBA situations.
>
> So maybe the right thing to do is to just teach lockdep about "lock
> protection locks". That would have solved the multi-queue issues for
> networking too - all the actual network drivers would still have taken
> just their single queue lock, but the one case that needs to take all of
> them would have taken a separate top-level lock first.
>
> Never mind that the multi-queue locks were always taken in the same order:
> it's never wrong to just have some top-level serialization, and anybody
> who needs to take <n> locks might as well do <n+1>, because they sure as
> hell aren't going to be on _any_ fastpaths.
>
> So the simplest solution really sounds like just teaching lockdep about
> that one special case. It's not "nesting" exactly, although it's obviously
> related to it.
Do as Linus suggested. The lock protection lock is called nest_lock.
Note that we still have the MAX_LOCK_DEPTH (48) limit to consider, so anything
that spills that it still up shit creek.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/lockdep.h | 34 | ||||
| -rw-r--r-- | include/linux/rcuclassic.h | 2 |
2 files changed, 19 insertions, 17 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index e431d1d6eaf3..93a8cc02a033 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -211,6 +211,7 @@ struct held_lock { | |||
| 211 | u64 prev_chain_key; | 211 | u64 prev_chain_key; |
| 212 | unsigned long acquire_ip; | 212 | unsigned long acquire_ip; |
| 213 | struct lockdep_map *instance; | 213 | struct lockdep_map *instance; |
| 214 | struct lockdep_map *nest_lock; | ||
| 214 | #ifdef CONFIG_LOCK_STAT | 215 | #ifdef CONFIG_LOCK_STAT |
| 215 | u64 waittime_stamp; | 216 | u64 waittime_stamp; |
| 216 | u64 holdtime_stamp; | 217 | u64 holdtime_stamp; |
| @@ -297,7 +298,8 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
| 297 | * 2: full validation | 298 | * 2: full validation |
| 298 | */ | 299 | */ |
| 299 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 300 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
| 300 | int trylock, int read, int check, unsigned long ip); | 301 | int trylock, int read, int check, |
| 302 | struct lockdep_map *nest_lock, unsigned long ip); | ||
| 301 | 303 | ||
| 302 | extern void lock_release(struct lockdep_map *lock, int nested, | 304 | extern void lock_release(struct lockdep_map *lock, int nested, |
| 303 | unsigned long ip); | 305 | unsigned long ip); |
| @@ -319,7 +321,7 @@ static inline void lockdep_on(void) | |||
| 319 | { | 321 | { |
| 320 | } | 322 | } |
| 321 | 323 | ||
| 322 | # define lock_acquire(l, s, t, r, c, i) do { } while (0) | 324 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
| 323 | # define lock_release(l, n, i) do { } while (0) | 325 | # define lock_release(l, n, i) do { } while (0) |
| 324 | # define lock_set_subclass(l, s, i) do { } while (0) | 326 | # define lock_set_subclass(l, s, i) do { } while (0) |
| 325 | # define lockdep_init() do { } while (0) | 327 | # define lockdep_init() do { } while (0) |
| @@ -407,9 +409,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
| 407 | 409 | ||
| 408 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 410 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 409 | # ifdef CONFIG_PROVE_LOCKING | 411 | # ifdef CONFIG_PROVE_LOCKING |
| 410 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) | 412 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
| 411 | # else | 413 | # else |
| 412 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) | 414 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
| 413 | # endif | 415 | # endif |
| 414 | # define spin_release(l, n, i) lock_release(l, n, i) | 416 | # define spin_release(l, n, i) lock_release(l, n, i) |
| 415 | #else | 417 | #else |
| @@ -419,11 +421,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
| 419 | 421 | ||
| 420 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 422 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 421 | # ifdef CONFIG_PROVE_LOCKING | 423 | # ifdef CONFIG_PROVE_LOCKING |
| 422 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) | 424 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
| 423 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) | 425 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) |
| 424 | # else | 426 | # else |
| 425 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) | 427 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
| 426 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) | 428 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) |
| 427 | # endif | 429 | # endif |
| 428 | # define rwlock_release(l, n, i) lock_release(l, n, i) | 430 | # define rwlock_release(l, n, i) lock_release(l, n, i) |
| 429 | #else | 431 | #else |
| @@ -434,9 +436,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
| 434 | 436 | ||
| 435 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 437 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 436 | # ifdef CONFIG_PROVE_LOCKING | 438 | # ifdef CONFIG_PROVE_LOCKING |
| 437 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) | 439 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
| 438 | # else | 440 | # else |
| 439 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) | 441 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
| 440 | # endif | 442 | # endif |
| 441 | # define mutex_release(l, n, i) lock_release(l, n, i) | 443 | # define mutex_release(l, n, i) lock_release(l, n, i) |
| 442 | #else | 444 | #else |
| @@ -446,11 +448,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
| 446 | 448 | ||
| 447 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 449 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 448 | # ifdef CONFIG_PROVE_LOCKING | 450 | # ifdef CONFIG_PROVE_LOCKING |
| 449 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) | 451 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
| 450 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) | 452 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) |
| 451 | # else | 453 | # else |
| 452 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) | 454 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
| 453 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) | 455 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) |
| 454 | # endif | 456 | # endif |
| 455 | # define rwsem_release(l, n, i) lock_release(l, n, i) | 457 | # define rwsem_release(l, n, i) lock_release(l, n, i) |
| 456 | #else | 458 | #else |
| @@ -461,9 +463,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
| 461 | 463 | ||
| 462 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 464 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 463 | # ifdef CONFIG_PROVE_LOCKING | 465 | # ifdef CONFIG_PROVE_LOCKING |
| 464 | # define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, _THIS_IP_) | 466 | # define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) |
| 465 | # else | 467 | # else |
| 466 | # define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, _THIS_IP_) | 468 | # define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) |
| 467 | # endif | 469 | # endif |
| 468 | # define map_release(l) lock_release(l, 1, _THIS_IP_) | 470 | # define map_release(l) lock_release(l, 1, _THIS_IP_) |
| 469 | #else | 471 | #else |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 8c774905dcfe..4ab843622727 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
| @@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu); | |||
| 117 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 117 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 118 | extern struct lockdep_map rcu_lock_map; | 118 | extern struct lockdep_map rcu_lock_map; |
| 119 | # define rcu_read_acquire() \ | 119 | # define rcu_read_acquire() \ |
| 120 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) | 120 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) |
| 121 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | 121 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) |
| 122 | #else | 122 | #else |
| 123 | # define rcu_read_acquire() do { } while (0) | 123 | # define rcu_read_acquire() do { } while (0) |
