diff options
-rw-r--r-- | include/linux/lockdep.h | 13 | ||||
-rw-r--r-- | kernel/lockdep.c | 25 |
2 files changed, 30 insertions, 8 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 06aed8305bf3..2186a64ee4b5 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -32,6 +32,17 @@ extern int lock_stat; | |||
32 | #define MAX_LOCKDEP_SUBCLASSES 8UL | 32 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * NR_LOCKDEP_CACHING_CLASSES ... Number of classes | ||
36 | * cached in the instance of lockdep_map | ||
37 | * | ||
38 | * Currently main class (subclass == 0) and signle depth subclass | ||
39 | * are cached in lockdep_map. This optimization is mainly targeting | ||
40 | * on rq->lock. double_rq_lock() acquires this highly competitive with | ||
41 | * single depth. | ||
42 | */ | ||
43 | #define NR_LOCKDEP_CACHING_CLASSES 2 | ||
44 | |||
45 | /* | ||
35 | * Lock-classes are keyed via unique addresses, by embedding the | 46 | * Lock-classes are keyed via unique addresses, by embedding the |
36 | * lockclass-key into the kernel (or module) .data section. (For | 47 | * lockclass-key into the kernel (or module) .data section. (For |
37 | * static locks we use the lock address itself as the key.) | 48 | * static locks we use the lock address itself as the key.) |
@@ -138,7 +149,7 @@ void clear_lock_stats(struct lock_class *class); | |||
138 | */ | 149 | */ |
139 | struct lockdep_map { | 150 | struct lockdep_map { |
140 | struct lock_class_key *key; | 151 | struct lock_class_key *key; |
141 | struct lock_class *class_cache; | 152 | struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; |
142 | const char *name; | 153 | const char *name; |
143 | #ifdef CONFIG_LOCK_STAT | 154 | #ifdef CONFIG_LOCK_STAT |
144 | int cpu; | 155 | int cpu; |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 84baa71cfda5..bc4d32871f9a 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -774,7 +774,9 @@ out_unlock_set: | |||
774 | raw_local_irq_restore(flags); | 774 | raw_local_irq_restore(flags); |
775 | 775 | ||
776 | if (!subclass || force) | 776 | if (!subclass || force) |
777 | lock->class_cache = class; | 777 | lock->class_cache[0] = class; |
778 | else if (subclass < NR_LOCKDEP_CACHING_CLASSES) | ||
779 | lock->class_cache[subclass] = class; | ||
778 | 780 | ||
779 | if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) | 781 | if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) |
780 | return NULL; | 782 | return NULL; |
@@ -2679,7 +2681,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2679 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | 2681 | void lockdep_init_map(struct lockdep_map *lock, const char *name, |
2680 | struct lock_class_key *key, int subclass) | 2682 | struct lock_class_key *key, int subclass) |
2681 | { | 2683 | { |
2682 | lock->class_cache = NULL; | 2684 | int i; |
2685 | |||
2686 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) | ||
2687 | lock->class_cache[i] = NULL; | ||
2688 | |||
2683 | #ifdef CONFIG_LOCK_STAT | 2689 | #ifdef CONFIG_LOCK_STAT |
2684 | lock->cpu = raw_smp_processor_id(); | 2690 | lock->cpu = raw_smp_processor_id(); |
2685 | #endif | 2691 | #endif |
@@ -2750,10 +2756,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2750 | if (lock->key == &__lockdep_no_validate__) | 2756 | if (lock->key == &__lockdep_no_validate__) |
2751 | check = 1; | 2757 | check = 1; |
2752 | 2758 | ||
2753 | if (!subclass) | 2759 | if (subclass < NR_LOCKDEP_CACHING_CLASSES) |
2754 | class = lock->class_cache; | 2760 | class = lock->class_cache[subclass]; |
2755 | /* | 2761 | /* |
2756 | * Not cached yet or subclass? | 2762 | * Not cached? |
2757 | */ | 2763 | */ |
2758 | if (unlikely(!class)) { | 2764 | if (unlikely(!class)) { |
2759 | class = register_lock_class(lock, subclass, 0); | 2765 | class = register_lock_class(lock, subclass, 0); |
@@ -2918,7 +2924,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) | |||
2918 | return 1; | 2924 | return 1; |
2919 | 2925 | ||
2920 | if (hlock->references) { | 2926 | if (hlock->references) { |
2921 | struct lock_class *class = lock->class_cache; | 2927 | struct lock_class *class = lock->class_cache[0]; |
2922 | 2928 | ||
2923 | if (!class) | 2929 | if (!class) |
2924 | class = look_up_lock_class(lock, 0); | 2930 | class = look_up_lock_class(lock, 0); |
@@ -3559,7 +3565,12 @@ void lockdep_reset_lock(struct lockdep_map *lock) | |||
3559 | if (list_empty(head)) | 3565 | if (list_empty(head)) |
3560 | continue; | 3566 | continue; |
3561 | list_for_each_entry_safe(class, next, head, hash_entry) { | 3567 | list_for_each_entry_safe(class, next, head, hash_entry) { |
3562 | if (unlikely(class == lock->class_cache)) { | 3568 | int match = 0; |
3569 | |||
3570 | for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) | ||
3571 | match |= class == lock->class_cache[j]; | ||
3572 | |||
3573 | if (unlikely(match)) { | ||
3563 | if (debug_locks_off_graph_unlock()) | 3574 | if (debug_locks_off_graph_unlock()) |
3564 | WARN_ON(1); | 3575 | WARN_ON(1); |
3565 | goto out_restore; | 3576 | goto out_restore; |