diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/lockdep.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 84baa71cfda5..bc4d32871f9a 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -774,7 +774,9 @@ out_unlock_set: | |||
| 774 | raw_local_irq_restore(flags); | 774 | raw_local_irq_restore(flags); |
| 775 | 775 | ||
| 776 | if (!subclass || force) | 776 | if (!subclass || force) |
| 777 | lock->class_cache = class; | 777 | lock->class_cache[0] = class; |
| 778 | else if (subclass < NR_LOCKDEP_CACHING_CLASSES) | ||
| 779 | lock->class_cache[subclass] = class; | ||
| 778 | 780 | ||
| 779 | if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) | 781 | if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) |
| 780 | return NULL; | 782 | return NULL; |
| @@ -2679,7 +2681,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
| 2679 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | 2681 | void lockdep_init_map(struct lockdep_map *lock, const char *name, |
| 2680 | struct lock_class_key *key, int subclass) | 2682 | struct lock_class_key *key, int subclass) |
| 2681 | { | 2683 | { |
| 2682 | lock->class_cache = NULL; | 2684 | int i; |
| 2685 | |||
| 2686 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) | ||
| 2687 | lock->class_cache[i] = NULL; | ||
| 2688 | |||
| 2683 | #ifdef CONFIG_LOCK_STAT | 2689 | #ifdef CONFIG_LOCK_STAT |
| 2684 | lock->cpu = raw_smp_processor_id(); | 2690 | lock->cpu = raw_smp_processor_id(); |
| 2685 | #endif | 2691 | #endif |
| @@ -2750,10 +2756,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 2750 | if (lock->key == &__lockdep_no_validate__) | 2756 | if (lock->key == &__lockdep_no_validate__) |
| 2751 | check = 1; | 2757 | check = 1; |
| 2752 | 2758 | ||
| 2753 | if (!subclass) | 2759 | if (subclass < NR_LOCKDEP_CACHING_CLASSES) |
| 2754 | class = lock->class_cache; | 2760 | class = lock->class_cache[subclass]; |
| 2755 | /* | 2761 | /* |
| 2756 | * Not cached yet or subclass? | 2762 | * Not cached? |
| 2757 | */ | 2763 | */ |
| 2758 | if (unlikely(!class)) { | 2764 | if (unlikely(!class)) { |
| 2759 | class = register_lock_class(lock, subclass, 0); | 2765 | class = register_lock_class(lock, subclass, 0); |
| @@ -2918,7 +2924,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) | |||
| 2918 | return 1; | 2924 | return 1; |
| 2919 | 2925 | ||
| 2920 | if (hlock->references) { | 2926 | if (hlock->references) { |
| 2921 | struct lock_class *class = lock->class_cache; | 2927 | struct lock_class *class = lock->class_cache[0]; |
| 2922 | 2928 | ||
| 2923 | if (!class) | 2929 | if (!class) |
| 2924 | class = look_up_lock_class(lock, 0); | 2930 | class = look_up_lock_class(lock, 0); |
| @@ -3559,7 +3565,12 @@ void lockdep_reset_lock(struct lockdep_map *lock) | |||
| 3559 | if (list_empty(head)) | 3565 | if (list_empty(head)) |
| 3560 | continue; | 3566 | continue; |
| 3561 | list_for_each_entry_safe(class, next, head, hash_entry) { | 3567 | list_for_each_entry_safe(class, next, head, hash_entry) { |
| 3562 | if (unlikely(class == lock->class_cache)) { | 3568 | int match = 0; |
| 3569 | |||
| 3570 | for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) | ||
| 3571 | match |= class == lock->class_cache[j]; | ||
| 3572 | |||
| 3573 | if (unlikely(match)) { | ||
| 3563 | if (debug_locks_off_graph_unlock()) | 3574 | if (debug_locks_off_graph_unlock()) |
| 3564 | WARN_ON(1); | 3575 | WARN_ON(1); |
| 3565 | goto out_restore; | 3576 | goto out_restore; |
