diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-02-17 12:27:37 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-02-17 12:28:05 -0500 |
commit | b7e56edba4b02f2079042c326a8cd72a44635817 (patch) | |
tree | b5042002e9747cd8fb1278d61f86d8b92a74c018 /lib/kernel_lock.c | |
parent | 13ca0fcaa33f6b1984c4111b6ec5df42689fea6f (diff) | |
parent | b0483e78e5c4c9871fc5541875b3bc006846d46b (diff) |
Merge branch 'linus' into x86/mm
x86/mm is on 32-rc4 and missing the spinlock namespace changes which
are needed for further commits into this topic.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'lib/kernel_lock.c')
-rw-r--r-- | lib/kernel_lock.c | 46 |
1 files changed, 28 insertions, 18 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 39f1029e3525..b135d04aa48a 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
@@ -5,10 +5,13 @@ | |||
5 | * relegated to obsolescence, but used by various less | 5 | * relegated to obsolescence, but used by various less |
6 | * important (or lazy) subsystems. | 6 | * important (or lazy) subsystems. |
7 | */ | 7 | */ |
8 | #include <linux/smp_lock.h> | ||
9 | #include <linux/module.h> | 8 | #include <linux/module.h> |
10 | #include <linux/kallsyms.h> | 9 | #include <linux/kallsyms.h> |
11 | #include <linux/semaphore.h> | 10 | #include <linux/semaphore.h> |
11 | #include <linux/smp_lock.h> | ||
12 | |||
13 | #define CREATE_TRACE_POINTS | ||
14 | #include <trace/events/bkl.h> | ||
12 | 15 | ||
13 | /* | 16 | /* |
14 | * The 'big kernel lock' | 17 | * The 'big kernel lock' |
@@ -20,7 +23,7 @@ | |||
20 | * | 23 | * |
21 | * Don't use in new code. | 24 | * Don't use in new code. |
22 | */ | 25 | */ |
23 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); | 26 | static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag); |
24 | 27 | ||
25 | 28 | ||
26 | /* | 29 | /* |
@@ -33,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); | |||
33 | * If it successfully gets the lock, it should increment | 36 | * If it successfully gets the lock, it should increment |
34 | * the preemption count like any spinlock does. | 37 | * the preemption count like any spinlock does. |
35 | * | 38 | * |
36 | * (This works on UP too - _raw_spin_trylock will never | 39 | * (This works on UP too - do_raw_spin_trylock will never |
37 | * return false in that case) | 40 | * return false in that case) |
38 | */ | 41 | */ |
39 | int __lockfunc __reacquire_kernel_lock(void) | 42 | int __lockfunc __reacquire_kernel_lock(void) |
40 | { | 43 | { |
41 | while (!_raw_spin_trylock(&kernel_flag)) { | 44 | while (!do_raw_spin_trylock(&kernel_flag)) { |
42 | if (need_resched()) | 45 | if (need_resched()) |
43 | return -EAGAIN; | 46 | return -EAGAIN; |
44 | cpu_relax(); | 47 | cpu_relax(); |
@@ -49,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void) | |||
49 | 52 | ||
50 | void __lockfunc __release_kernel_lock(void) | 53 | void __lockfunc __release_kernel_lock(void) |
51 | { | 54 | { |
52 | _raw_spin_unlock(&kernel_flag); | 55 | do_raw_spin_unlock(&kernel_flag); |
53 | preempt_enable_no_resched(); | 56 | preempt_enable_no_resched(); |
54 | } | 57 | } |
55 | 58 | ||
56 | /* | 59 | /* |
57 | * These are the BKL spinlocks - we try to be polite about preemption. | 60 | * These are the BKL spinlocks - we try to be polite about preemption. |
58 | * If SMP is not on (ie UP preemption), this all goes away because the | 61 | * If SMP is not on (ie UP preemption), this all goes away because the |
59 | * _raw_spin_trylock() will always succeed. | 62 | * do_raw_spin_trylock() will always succeed. |
60 | */ | 63 | */ |
61 | #ifdef CONFIG_PREEMPT | 64 | #ifdef CONFIG_PREEMPT |
62 | static inline void __lock_kernel(void) | 65 | static inline void __lock_kernel(void) |
63 | { | 66 | { |
64 | preempt_disable(); | 67 | preempt_disable(); |
65 | if (unlikely(!_raw_spin_trylock(&kernel_flag))) { | 68 | if (unlikely(!do_raw_spin_trylock(&kernel_flag))) { |
66 | /* | 69 | /* |
67 | * If preemption was disabled even before this | 70 | * If preemption was disabled even before this |
68 | * was called, there's nothing we can be polite | 71 | * was called, there's nothing we can be polite |
69 | * about - just spin. | 72 | * about - just spin. |
70 | */ | 73 | */ |
71 | if (preempt_count() > 1) { | 74 | if (preempt_count() > 1) { |
72 | _raw_spin_lock(&kernel_flag); | 75 | do_raw_spin_lock(&kernel_flag); |
73 | return; | 76 | return; |
74 | } | 77 | } |
75 | 78 | ||
@@ -79,10 +82,10 @@ static inline void __lock_kernel(void) | |||
79 | */ | 82 | */ |
80 | do { | 83 | do { |
81 | preempt_enable(); | 84 | preempt_enable(); |
82 | while (spin_is_locked(&kernel_flag)) | 85 | while (raw_spin_is_locked(&kernel_flag)) |
83 | cpu_relax(); | 86 | cpu_relax(); |
84 | preempt_disable(); | 87 | preempt_disable(); |
85 | } while (!_raw_spin_trylock(&kernel_flag)); | 88 | } while (!do_raw_spin_trylock(&kernel_flag)); |
86 | } | 89 | } |
87 | } | 90 | } |
88 | 91 | ||
@@ -93,7 +96,7 @@ static inline void __lock_kernel(void) | |||
93 | */ | 96 | */ |
94 | static inline void __lock_kernel(void) | 97 | static inline void __lock_kernel(void) |
95 | { | 98 | { |
96 | _raw_spin_lock(&kernel_flag); | 99 | do_raw_spin_lock(&kernel_flag); |
97 | } | 100 | } |
98 | #endif | 101 | #endif |
99 | 102 | ||
@@ -103,7 +106,7 @@ static inline void __unlock_kernel(void) | |||
103 | * the BKL is not covered by lockdep, so we open-code the | 106 | * the BKL is not covered by lockdep, so we open-code the |
104 | * unlocking sequence (and thus avoid the dep-chain ops): | 107 | * unlocking sequence (and thus avoid the dep-chain ops): |
105 | */ | 108 | */ |
106 | _raw_spin_unlock(&kernel_flag); | 109 | do_raw_spin_unlock(&kernel_flag); |
107 | preempt_enable(); | 110 | preempt_enable(); |
108 | } | 111 | } |
109 | 112 | ||
@@ -113,21 +116,28 @@ static inline void __unlock_kernel(void) | |||
113 | * This cannot happen asynchronously, so we only need to | 116 | * This cannot happen asynchronously, so we only need to |
114 | * worry about other CPU's. | 117 | * worry about other CPU's. |
115 | */ | 118 | */ |
116 | void __lockfunc lock_kernel(void) | 119 | void __lockfunc _lock_kernel(const char *func, const char *file, int line) |
117 | { | 120 | { |
118 | int depth = current->lock_depth+1; | 121 | int depth = current->lock_depth + 1; |
119 | if (likely(!depth)) | 122 | |
123 | trace_lock_kernel(func, file, line); | ||
124 | |||
125 | if (likely(!depth)) { | ||
126 | might_sleep(); | ||
120 | __lock_kernel(); | 127 | __lock_kernel(); |
128 | } | ||
121 | current->lock_depth = depth; | 129 | current->lock_depth = depth; |
122 | } | 130 | } |
123 | 131 | ||
124 | void __lockfunc unlock_kernel(void) | 132 | void __lockfunc _unlock_kernel(const char *func, const char *file, int line) |
125 | { | 133 | { |
126 | BUG_ON(current->lock_depth < 0); | 134 | BUG_ON(current->lock_depth < 0); |
127 | if (likely(--current->lock_depth < 0)) | 135 | if (likely(--current->lock_depth < 0)) |
128 | __unlock_kernel(); | 136 | __unlock_kernel(); |
137 | |||
138 | trace_unlock_kernel(func, file, line); | ||
129 | } | 139 | } |
130 | 140 | ||
131 | EXPORT_SYMBOL(lock_kernel); | 141 | EXPORT_SYMBOL(_lock_kernel); |
132 | EXPORT_SYMBOL(unlock_kernel); | 142 | EXPORT_SYMBOL(_unlock_kernel); |
133 | 143 | ||