diff options
author | Andrew Morton <akpm@linux-foundation.org> | 2016-02-11 19:13:14 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-02-11 21:35:48 -0500 |
commit | 4a389810bc3cb0e73443104f0827e81e23cb1e12 (patch) | |
tree | 00c0ed0ee923222bbf68bc23ed88450120745428 /kernel/locking | |
parent | 6b75d14912f2d89a3539c0b3a100519e1eec9a63 (diff) |
kernel/locking/lockdep.c: convert hash tables to hlists
Mike said:
: CONFIG_UBSAN_ALIGNMENT breaks x86-64 kernel with lockdep enabled, i. e
: kernel with CONFIG_UBSAN_ALIGNMENT fails to load without even any error
: message.
:
: The problem is that ubsan callbacks use spinlocks and might be called
: before lockdep is initialized. Particularly this line in the
: reserve_ebda_region function causes problem:
:
: lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
:
: If i put lockdep_init() before reserve_ebda_region call in
: x86_64_start_reservations kernel loads well.
Fix this ordering issue permanently: change lockdep so that it uses
hlists for the hash tables. Unlike a list_head, an hlist_head is in its
initialized state when it is all-zeroes, so lockdep is ready for
operation immediately upon boot - lockdep_init() need not have run.
The patch will also save some memory.
lockdep_init() and lockdep_initialized can be done away with now - a 4.6
patch has been prepared to do this.
Reported-by: Mike Krinkin <krinkin.m.u@gmail.com>
Suggested-by: Mike Krinkin <krinkin.m.u@gmail.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/lockdep.c | 42 |
1 files changed, 19 insertions, 23 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 60ace56618f6..7537e568dabe 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -292,7 +292,7 @@ LIST_HEAD(all_lock_classes); | |||
292 | #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) | 292 | #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) |
293 | #define classhashentry(key) (classhash_table + __classhashfn((key))) | 293 | #define classhashentry(key) (classhash_table + __classhashfn((key))) |
294 | 294 | ||
295 | static struct list_head classhash_table[CLASSHASH_SIZE]; | 295 | static struct hlist_head classhash_table[CLASSHASH_SIZE]; |
296 | 296 | ||
297 | /* | 297 | /* |
298 | * We put the lock dependency chains into a hash-table as well, to cache | 298 | * We put the lock dependency chains into a hash-table as well, to cache |
@@ -303,7 +303,7 @@ static struct list_head classhash_table[CLASSHASH_SIZE]; | |||
303 | #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) | 303 | #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) |
304 | #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) | 304 | #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) |
305 | 305 | ||
306 | static struct list_head chainhash_table[CHAINHASH_SIZE]; | 306 | static struct hlist_head chainhash_table[CHAINHASH_SIZE]; |
307 | 307 | ||
308 | /* | 308 | /* |
309 | * The hash key of the lock dependency chains is a hash itself too: | 309 | * The hash key of the lock dependency chains is a hash itself too: |
@@ -666,7 +666,7 @@ static inline struct lock_class * | |||
666 | look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | 666 | look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) |
667 | { | 667 | { |
668 | struct lockdep_subclass_key *key; | 668 | struct lockdep_subclass_key *key; |
669 | struct list_head *hash_head; | 669 | struct hlist_head *hash_head; |
670 | struct lock_class *class; | 670 | struct lock_class *class; |
671 | 671 | ||
672 | #ifdef CONFIG_DEBUG_LOCKDEP | 672 | #ifdef CONFIG_DEBUG_LOCKDEP |
@@ -719,7 +719,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
719 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 719 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
720 | return NULL; | 720 | return NULL; |
721 | 721 | ||
722 | list_for_each_entry_rcu(class, hash_head, hash_entry) { | 722 | hlist_for_each_entry_rcu(class, hash_head, hash_entry) { |
723 | if (class->key == key) { | 723 | if (class->key == key) { |
724 | /* | 724 | /* |
725 | * Huh! same key, different name? Did someone trample | 725 | * Huh! same key, different name? Did someone trample |
@@ -742,7 +742,7 @@ static inline struct lock_class * | |||
742 | register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | 742 | register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) |
743 | { | 743 | { |
744 | struct lockdep_subclass_key *key; | 744 | struct lockdep_subclass_key *key; |
745 | struct list_head *hash_head; | 745 | struct hlist_head *hash_head; |
746 | struct lock_class *class; | 746 | struct lock_class *class; |
747 | 747 | ||
748 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); | 748 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); |
@@ -774,7 +774,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
774 | * We have to do the hash-walk again, to avoid races | 774 | * We have to do the hash-walk again, to avoid races |
775 | * with another CPU: | 775 | * with another CPU: |
776 | */ | 776 | */ |
777 | list_for_each_entry_rcu(class, hash_head, hash_entry) { | 777 | hlist_for_each_entry_rcu(class, hash_head, hash_entry) { |
778 | if (class->key == key) | 778 | if (class->key == key) |
779 | goto out_unlock_set; | 779 | goto out_unlock_set; |
780 | } | 780 | } |
@@ -805,7 +805,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
805 | * We use RCU's safe list-add method to make | 805 | * We use RCU's safe list-add method to make |
806 | * parallel walking of the hash-list safe: | 806 | * parallel walking of the hash-list safe: |
807 | */ | 807 | */ |
808 | list_add_tail_rcu(&class->hash_entry, hash_head); | 808 | hlist_add_head_rcu(&class->hash_entry, hash_head); |
809 | /* | 809 | /* |
810 | * Add it to the global list of classes: | 810 | * Add it to the global list of classes: |
811 | */ | 811 | */ |
@@ -2017,7 +2017,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
2017 | u64 chain_key) | 2017 | u64 chain_key) |
2018 | { | 2018 | { |
2019 | struct lock_class *class = hlock_class(hlock); | 2019 | struct lock_class *class = hlock_class(hlock); |
2020 | struct list_head *hash_head = chainhashentry(chain_key); | 2020 | struct hlist_head *hash_head = chainhashentry(chain_key); |
2021 | struct lock_chain *chain; | 2021 | struct lock_chain *chain; |
2022 | struct held_lock *hlock_curr; | 2022 | struct held_lock *hlock_curr; |
2023 | int i, j; | 2023 | int i, j; |
@@ -2033,7 +2033,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
2033 | * We can walk it lock-free, because entries only get added | 2033 | * We can walk it lock-free, because entries only get added |
2034 | * to the hash: | 2034 | * to the hash: |
2035 | */ | 2035 | */ |
2036 | list_for_each_entry_rcu(chain, hash_head, entry) { | 2036 | hlist_for_each_entry_rcu(chain, hash_head, entry) { |
2037 | if (chain->chain_key == chain_key) { | 2037 | if (chain->chain_key == chain_key) { |
2038 | cache_hit: | 2038 | cache_hit: |
2039 | debug_atomic_inc(chain_lookup_hits); | 2039 | debug_atomic_inc(chain_lookup_hits); |
@@ -2057,7 +2057,7 @@ cache_hit: | |||
2057 | /* | 2057 | /* |
2058 | * We have to walk the chain again locked - to avoid duplicates: | 2058 | * We have to walk the chain again locked - to avoid duplicates: |
2059 | */ | 2059 | */ |
2060 | list_for_each_entry(chain, hash_head, entry) { | 2060 | hlist_for_each_entry(chain, hash_head, entry) { |
2061 | if (chain->chain_key == chain_key) { | 2061 | if (chain->chain_key == chain_key) { |
2062 | graph_unlock(); | 2062 | graph_unlock(); |
2063 | goto cache_hit; | 2063 | goto cache_hit; |
@@ -2091,7 +2091,7 @@ cache_hit: | |||
2091 | } | 2091 | } |
2092 | chain_hlocks[chain->base + j] = class - lock_classes; | 2092 | chain_hlocks[chain->base + j] = class - lock_classes; |
2093 | } | 2093 | } |
2094 | list_add_tail_rcu(&chain->entry, hash_head); | 2094 | hlist_add_head_rcu(&chain->entry, hash_head); |
2095 | debug_atomic_inc(chain_lookup_misses); | 2095 | debug_atomic_inc(chain_lookup_misses); |
2096 | inc_chains(); | 2096 | inc_chains(); |
2097 | 2097 | ||
@@ -3875,7 +3875,7 @@ void lockdep_reset(void) | |||
3875 | nr_process_chains = 0; | 3875 | nr_process_chains = 0; |
3876 | debug_locks = 1; | 3876 | debug_locks = 1; |
3877 | for (i = 0; i < CHAINHASH_SIZE; i++) | 3877 | for (i = 0; i < CHAINHASH_SIZE; i++) |
3878 | INIT_LIST_HEAD(chainhash_table + i); | 3878 | INIT_HLIST_HEAD(chainhash_table + i); |
3879 | raw_local_irq_restore(flags); | 3879 | raw_local_irq_restore(flags); |
3880 | } | 3880 | } |
3881 | 3881 | ||
@@ -3894,7 +3894,7 @@ static void zap_class(struct lock_class *class) | |||
3894 | /* | 3894 | /* |
3895 | * Unhash the class and remove it from the all_lock_classes list: | 3895 | * Unhash the class and remove it from the all_lock_classes list: |
3896 | */ | 3896 | */ |
3897 | list_del_rcu(&class->hash_entry); | 3897 | hlist_del_rcu(&class->hash_entry); |
3898 | list_del_rcu(&class->lock_entry); | 3898 | list_del_rcu(&class->lock_entry); |
3899 | 3899 | ||
3900 | RCU_INIT_POINTER(class->key, NULL); | 3900 | RCU_INIT_POINTER(class->key, NULL); |
@@ -3917,7 +3917,7 @@ static inline int within(const void *addr, void *start, unsigned long size) | |||
3917 | void lockdep_free_key_range(void *start, unsigned long size) | 3917 | void lockdep_free_key_range(void *start, unsigned long size) |
3918 | { | 3918 | { |
3919 | struct lock_class *class; | 3919 | struct lock_class *class; |
3920 | struct list_head *head; | 3920 | struct hlist_head *head; |
3921 | unsigned long flags; | 3921 | unsigned long flags; |
3922 | int i; | 3922 | int i; |
3923 | int locked; | 3923 | int locked; |
@@ -3930,9 +3930,7 @@ void lockdep_free_key_range(void *start, unsigned long size) | |||
3930 | */ | 3930 | */ |
3931 | for (i = 0; i < CLASSHASH_SIZE; i++) { | 3931 | for (i = 0; i < CLASSHASH_SIZE; i++) { |
3932 | head = classhash_table + i; | 3932 | head = classhash_table + i; |
3933 | if (list_empty(head)) | 3933 | hlist_for_each_entry_rcu(class, head, hash_entry) { |
3934 | continue; | ||
3935 | list_for_each_entry_rcu(class, head, hash_entry) { | ||
3936 | if (within(class->key, start, size)) | 3934 | if (within(class->key, start, size)) |
3937 | zap_class(class); | 3935 | zap_class(class); |
3938 | else if (within(class->name, start, size)) | 3936 | else if (within(class->name, start, size)) |
@@ -3962,7 +3960,7 @@ void lockdep_free_key_range(void *start, unsigned long size) | |||
3962 | void lockdep_reset_lock(struct lockdep_map *lock) | 3960 | void lockdep_reset_lock(struct lockdep_map *lock) |
3963 | { | 3961 | { |
3964 | struct lock_class *class; | 3962 | struct lock_class *class; |
3965 | struct list_head *head; | 3963 | struct hlist_head *head; |
3966 | unsigned long flags; | 3964 | unsigned long flags; |
3967 | int i, j; | 3965 | int i, j; |
3968 | int locked; | 3966 | int locked; |
@@ -3987,9 +3985,7 @@ void lockdep_reset_lock(struct lockdep_map *lock) | |||
3987 | locked = graph_lock(); | 3985 | locked = graph_lock(); |
3988 | for (i = 0; i < CLASSHASH_SIZE; i++) { | 3986 | for (i = 0; i < CLASSHASH_SIZE; i++) { |
3989 | head = classhash_table + i; | 3987 | head = classhash_table + i; |
3990 | if (list_empty(head)) | 3988 | hlist_for_each_entry_rcu(class, head, hash_entry) { |
3991 | continue; | ||
3992 | list_for_each_entry_rcu(class, head, hash_entry) { | ||
3993 | int match = 0; | 3989 | int match = 0; |
3994 | 3990 | ||
3995 | for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) | 3991 | for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) |
@@ -4027,10 +4023,10 @@ void lockdep_init(void) | |||
4027 | return; | 4023 | return; |
4028 | 4024 | ||
4029 | for (i = 0; i < CLASSHASH_SIZE; i++) | 4025 | for (i = 0; i < CLASSHASH_SIZE; i++) |
4030 | INIT_LIST_HEAD(classhash_table + i); | 4026 | INIT_HLIST_HEAD(classhash_table + i); |
4031 | 4027 | ||
4032 | for (i = 0; i < CHAINHASH_SIZE; i++) | 4028 | for (i = 0; i < CHAINHASH_SIZE; i++) |
4033 | INIT_LIST_HEAD(chainhash_table + i); | 4029 | INIT_HLIST_HEAD(chainhash_table + i); |
4034 | 4030 | ||
4035 | lockdep_initialized = 1; | 4031 | lockdep_initialized = 1; |
4036 | } | 4032 | } |