diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-08-11 03:30:21 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-11 03:30:21 -0400 |
commit | 64aa348edc617dea17bbd01ddee4e47886d5ec8c (patch) | |
tree | 002b5fa796aff225d0cb9d0c32bb3ba96da6eaaf | |
parent | 5e710e37bde120bb069f691bee68e69ef4393173 (diff) |
lockdep: lock_set_subclass - reset a held lock's subclass
this can be used to reset a held lock's subclass, for arbitrary-depth
iterated data structures such as trees or lists which have per-node
locks.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/lockdep.h | 4 | ||||
-rw-r--r-- | kernel/lockdep.c | 69 |
2 files changed, 73 insertions, 0 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 1bfdc30bb0af..f270ce1582ff 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -300,6 +300,9 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
300 | extern void lock_release(struct lockdep_map *lock, int nested, | 300 | extern void lock_release(struct lockdep_map *lock, int nested, |
301 | unsigned long ip); | 301 | unsigned long ip); |
302 | 302 | ||
303 | extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, | ||
304 | unsigned long ip); | ||
305 | |||
303 | # define INIT_LOCKDEP .lockdep_recursion = 0, | 306 | # define INIT_LOCKDEP .lockdep_recursion = 0, |
304 | 307 | ||
305 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) | 308 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
@@ -316,6 +319,7 @@ static inline void lockdep_on(void) | |||
316 | 319 | ||
317 | # define lock_acquire(l, s, t, r, c, i) do { } while (0) | 320 | # define lock_acquire(l, s, t, r, c, i) do { } while (0) |
318 | # define lock_release(l, n, i) do { } while (0) | 321 | # define lock_release(l, n, i) do { } while (0) |
322 | # define lock_set_subclass(l, s, i) do { } while (0) | ||
319 | # define lockdep_init() do { } while (0) | 323 | # define lockdep_init() do { } while (0) |
320 | # define lockdep_info() do { } while (0) | 324 | # define lockdep_info() do { } while (0) |
321 | # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) | 325 | # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 6999e64fc248..e14d383dcb0b 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -2660,6 +2660,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, | |||
2660 | return 1; | 2660 | return 1; |
2661 | } | 2661 | } |
2662 | 2662 | ||
2663 | static int | ||
2664 | __lock_set_subclass(struct lockdep_map *lock, | ||
2665 | unsigned int subclass, unsigned long ip) | ||
2666 | { | ||
2667 | struct task_struct *curr = current; | ||
2668 | struct held_lock *hlock, *prev_hlock; | ||
2669 | struct lock_class *class; | ||
2670 | unsigned int depth; | ||
2671 | int i; | ||
2672 | |||
2673 | depth = curr->lockdep_depth; | ||
2674 | if (DEBUG_LOCKS_WARN_ON(!depth)) | ||
2675 | return 0; | ||
2676 | |||
2677 | prev_hlock = NULL; | ||
2678 | for (i = depth-1; i >= 0; i--) { | ||
2679 | hlock = curr->held_locks + i; | ||
2680 | /* | ||
2681 | * We must not cross into another context: | ||
2682 | */ | ||
2683 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | ||
2684 | break; | ||
2685 | if (hlock->instance == lock) | ||
2686 | goto found_it; | ||
2687 | prev_hlock = hlock; | ||
2688 | } | ||
2689 | return print_unlock_inbalance_bug(curr, lock, ip); | ||
2690 | |||
2691 | found_it: | ||
2692 | class = register_lock_class(lock, subclass, 0); | ||
2693 | hlock->class = class; | ||
2694 | |||
2695 | curr->lockdep_depth = i; | ||
2696 | curr->curr_chain_key = hlock->prev_chain_key; | ||
2697 | |||
2698 | for (; i < depth; i++) { | ||
2699 | hlock = curr->held_locks + i; | ||
2700 | if (!__lock_acquire(hlock->instance, | ||
2701 | hlock->class->subclass, hlock->trylock, | ||
2702 | hlock->read, hlock->check, hlock->hardirqs_off, | ||
2703 | hlock->acquire_ip)) | ||
2704 | return 0; | ||
2705 | } | ||
2706 | |||
2707 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) | ||
2708 | return 0; | ||
2709 | return 1; | ||
2710 | } | ||
2711 | |||
2663 | /* | 2712 | /* |
2664 | * Remove the lock to the list of currently held locks in a | 2713 | * Remove the lock to the list of currently held locks in a |
2665 | * potentially non-nested (out of order) manner. This is a | 2714 | * potentially non-nested (out of order) manner. This is a |
@@ -2824,6 +2873,26 @@ static void check_flags(unsigned long flags) | |||
2824 | #endif | 2873 | #endif |
2825 | } | 2874 | } |
2826 | 2875 | ||
2876 | void | ||
2877 | lock_set_subclass(struct lockdep_map *lock, | ||
2878 | unsigned int subclass, unsigned long ip) | ||
2879 | { | ||
2880 | unsigned long flags; | ||
2881 | |||
2882 | if (unlikely(current->lockdep_recursion)) | ||
2883 | return; | ||
2884 | |||
2885 | raw_local_irq_save(flags); | ||
2886 | current->lockdep_recursion = 1; | ||
2887 | check_flags(flags); | ||
2888 | if (__lock_set_subclass(lock, subclass, ip)) | ||
2889 | check_chain_key(current); | ||
2890 | current->lockdep_recursion = 0; | ||
2891 | raw_local_irq_restore(flags); | ||
2892 | } | ||
2893 | |||
2894 | EXPORT_SYMBOL_GPL(lock_set_subclass); | ||
2895 | |||
2827 | /* | 2896 | /* |
2828 | * We are not always called with irqs disabled - do that here, | 2897 | * We are not always called with irqs disabled - do that here, |
2829 | * and also avoid lockdep recursion: | 2898 | * and also avoid lockdep recursion: |