diff options
author | Andrey Ryabinin <aryabinin@virtuozzo.com> | 2016-02-04 14:29:36 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-02-09 06:03:25 -0500 |
commit | 06bea3dbfe6a4c333c4333362c46bdf4d9e43504 (patch) | |
tree | b06cdf5f34a8b2c5cbbbf6e7535893f46ddc3307 /kernel/locking/lockdep.c | |
parent | a63f38cc4ccfa076f87fc3d0c276ee62e710f953 (diff) |
locking/lockdep: Eliminate lockdep_init()
Lockdep is initialized at compile time now. Get rid of lockdep_init().
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Krinkin <krinkin.m.u@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Cc: mm-commits@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r-- | kernel/locking/lockdep.c | 59 |
1 files changed, 0 insertions, 59 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 716547fdb873..3261214323fa 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -123,8 +123,6 @@ static inline int debug_locks_off_graph_unlock(void) | |||
123 | return ret; | 123 | return ret; |
124 | } | 124 | } |
125 | 125 | ||
126 | static int lockdep_initialized; | ||
127 | |||
128 | unsigned long nr_list_entries; | 126 | unsigned long nr_list_entries; |
129 | static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; | 127 | static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; |
130 | 128 | ||
@@ -434,19 +432,6 @@ unsigned int max_lockdep_depth; | |||
434 | 432 | ||
435 | #ifdef CONFIG_DEBUG_LOCKDEP | 433 | #ifdef CONFIG_DEBUG_LOCKDEP |
436 | /* | 434 | /* |
437 | * We cannot printk in early bootup code. Not even early_printk() | ||
438 | * might work. So we mark any initialization errors and printk | ||
439 | * about it later on, in lockdep_info(). | ||
440 | */ | ||
441 | static int lockdep_init_error; | ||
442 | static const char *lock_init_error; | ||
443 | static unsigned long lockdep_init_trace_data[20]; | ||
444 | static struct stack_trace lockdep_init_trace = { | ||
445 | .max_entries = ARRAY_SIZE(lockdep_init_trace_data), | ||
446 | .entries = lockdep_init_trace_data, | ||
447 | }; | ||
448 | |||
449 | /* | ||
450 | * Various lockdep statistics: | 435 | * Various lockdep statistics: |
451 | */ | 436 | */ |
452 | DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); | 437 | DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); |
@@ -669,20 +654,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
669 | struct hlist_head *hash_head; | 654 | struct hlist_head *hash_head; |
670 | struct lock_class *class; | 655 | struct lock_class *class; |
671 | 656 | ||
672 | #ifdef CONFIG_DEBUG_LOCKDEP | ||
673 | /* | ||
674 | * If the architecture calls into lockdep before initializing | ||
675 | * the hashes then we'll warn about it later. (we cannot printk | ||
676 | * right now) | ||
677 | */ | ||
678 | if (unlikely(!lockdep_initialized)) { | ||
679 | lockdep_init(); | ||
680 | lockdep_init_error = 1; | ||
681 | lock_init_error = lock->name; | ||
682 | save_stack_trace(&lockdep_init_trace); | ||
683 | } | ||
684 | #endif | ||
685 | |||
686 | if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { | 657 | if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { |
687 | debug_locks_off(); | 658 | debug_locks_off(); |
688 | printk(KERN_ERR | 659 | printk(KERN_ERR |
@@ -4013,28 +3984,6 @@ out_restore: | |||
4013 | raw_local_irq_restore(flags); | 3984 | raw_local_irq_restore(flags); |
4014 | } | 3985 | } |
4015 | 3986 | ||
4016 | void lockdep_init(void) | ||
4017 | { | ||
4018 | int i; | ||
4019 | |||
4020 | /* | ||
4021 | * Some architectures have their own start_kernel() | ||
4022 | * code which calls lockdep_init(), while we also | ||
4023 | * call lockdep_init() from the start_kernel() itself, | ||
4024 | * and we want to initialize the hashes only once: | ||
4025 | */ | ||
4026 | if (lockdep_initialized) | ||
4027 | return; | ||
4028 | |||
4029 | for (i = 0; i < CLASSHASH_SIZE; i++) | ||
4030 | INIT_HLIST_HEAD(classhash_table + i); | ||
4031 | |||
4032 | for (i = 0; i < CHAINHASH_SIZE; i++) | ||
4033 | INIT_HLIST_HEAD(chainhash_table + i); | ||
4034 | |||
4035 | lockdep_initialized = 1; | ||
4036 | } | ||
4037 | |||
4038 | void __init lockdep_info(void) | 3987 | void __init lockdep_info(void) |
4039 | { | 3988 | { |
4040 | printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); | 3989 | printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); |
@@ -4061,14 +4010,6 @@ void __init lockdep_info(void) | |||
4061 | 4010 | ||
4062 | printk(" per task-struct memory footprint: %lu bytes\n", | 4011 | printk(" per task-struct memory footprint: %lu bytes\n", |
4063 | sizeof(struct held_lock) * MAX_LOCK_DEPTH); | 4012 | sizeof(struct held_lock) * MAX_LOCK_DEPTH); |
4064 | |||
4065 | #ifdef CONFIG_DEBUG_LOCKDEP | ||
4066 | if (lockdep_init_error) { | ||
4067 | printk("WARNING: lockdep init error: lock '%s' was acquired before lockdep_init().\n", lock_init_error); | ||
4068 | printk("Call stack leading to lockdep invocation was:\n"); | ||
4069 | print_stack_trace(&lockdep_init_trace, 0); | ||
4070 | } | ||
4071 | #endif | ||
4072 | } | 4013 | } |
4073 | 4014 | ||
4074 | static void | 4015 | static void |