diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-08-11 03:30:21 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-11 03:30:21 -0400 |
commit | 64aa348edc617dea17bbd01ddee4e47886d5ec8c (patch) | |
tree | 002b5fa796aff225d0cb9d0c32bb3ba96da6eaaf /kernel/lockdep.c | |
parent | 5e710e37bde120bb069f691bee68e69ef4393173 (diff) |
lockdep: lock_set_subclass - reset a held lock's subclass
this can be used to reset a held lock's subclass, for arbitrary-depth
iterated data structures such as trees or lists which have per-node
locks.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 69 |
1 files changed, 69 insertions, 0 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 6999e64fc248..e14d383dcb0b 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -2660,6 +2660,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, | |||
2660 | return 1; | 2660 | return 1; |
2661 | } | 2661 | } |
2662 | 2662 | ||
2663 | static int | ||
2664 | __lock_set_subclass(struct lockdep_map *lock, | ||
2665 | unsigned int subclass, unsigned long ip) | ||
2666 | { | ||
2667 | struct task_struct *curr = current; | ||
2668 | struct held_lock *hlock, *prev_hlock; | ||
2669 | struct lock_class *class; | ||
2670 | unsigned int depth; | ||
2671 | int i; | ||
2672 | |||
2673 | depth = curr->lockdep_depth; | ||
2674 | if (DEBUG_LOCKS_WARN_ON(!depth)) | ||
2675 | return 0; | ||
2676 | |||
2677 | prev_hlock = NULL; | ||
2678 | for (i = depth-1; i >= 0; i--) { | ||
2679 | hlock = curr->held_locks + i; | ||
2680 | /* | ||
2681 | * We must not cross into another context: | ||
2682 | */ | ||
2683 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | ||
2684 | break; | ||
2685 | if (hlock->instance == lock) | ||
2686 | goto found_it; | ||
2687 | prev_hlock = hlock; | ||
2688 | } | ||
2689 | return print_unlock_inbalance_bug(curr, lock, ip); | ||
2690 | |||
2691 | found_it: | ||
2692 | class = register_lock_class(lock, subclass, 0); | ||
2693 | hlock->class = class; | ||
2694 | |||
2695 | curr->lockdep_depth = i; | ||
2696 | curr->curr_chain_key = hlock->prev_chain_key; | ||
2697 | |||
2698 | for (; i < depth; i++) { | ||
2699 | hlock = curr->held_locks + i; | ||
2700 | if (!__lock_acquire(hlock->instance, | ||
2701 | hlock->class->subclass, hlock->trylock, | ||
2702 | hlock->read, hlock->check, hlock->hardirqs_off, | ||
2703 | hlock->acquire_ip)) | ||
2704 | return 0; | ||
2705 | } | ||
2706 | |||
2707 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) | ||
2708 | return 0; | ||
2709 | return 1; | ||
2710 | } | ||
2711 | |||
2663 | /* | 2712 | /* |
2664 | * Remove the lock to the list of currently held locks in a | 2713 | * Remove the lock to the list of currently held locks in a |
2665 | * potentially non-nested (out of order) manner. This is a | 2714 | * potentially non-nested (out of order) manner. This is a |
@@ -2824,6 +2873,26 @@ static void check_flags(unsigned long flags) | |||
2824 | #endif | 2873 | #endif |
2825 | } | 2874 | } |
2826 | 2875 | ||
2876 | void | ||
2877 | lock_set_subclass(struct lockdep_map *lock, | ||
2878 | unsigned int subclass, unsigned long ip) | ||
2879 | { | ||
2880 | unsigned long flags; | ||
2881 | |||
2882 | if (unlikely(current->lockdep_recursion)) | ||
2883 | return; | ||
2884 | |||
2885 | raw_local_irq_save(flags); | ||
2886 | current->lockdep_recursion = 1; | ||
2887 | check_flags(flags); | ||
2888 | if (__lock_set_subclass(lock, subclass, ip)) | ||
2889 | check_chain_key(current); | ||
2890 | current->lockdep_recursion = 0; | ||
2891 | raw_local_irq_restore(flags); | ||
2892 | } | ||
2893 | |||
2894 | EXPORT_SYMBOL_GPL(lock_set_subclass); | ||
2895 | |||
2827 | /* | 2896 | /* |
2828 | * We are not always called with irqs disabled - do that here, | 2897 | * We are not always called with irqs disabled - do that here, |
2829 | * and also avoid lockdep recursion: | 2898 | * and also avoid lockdep recursion: |