diff options
Diffstat (limited to 'lib/kernel_lock.c')
-rw-r--r-- | lib/kernel_lock.c | 136 |
1 files changed, 0 insertions, 136 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c deleted file mode 100644 index d80e12265862..000000000000 --- a/lib/kernel_lock.c +++ /dev/null | |||
@@ -1,136 +0,0 @@ | |||
1 | /* | ||
2 | * lib/kernel_lock.c | ||
3 | * | ||
4 | * This is the traditional BKL - big kernel lock. Largely | ||
5 | * relegated to obsolescence, but used by various less | ||
6 | * important (or lazy) subsystems. | ||
7 | */ | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/kallsyms.h> | ||
10 | #include <linux/semaphore.h> | ||
11 | #include <linux/smp_lock.h> | ||
12 | |||
13 | /* | ||
14 | * The 'big kernel lock' | ||
15 | * | ||
16 | * This spinlock is taken and released recursively by lock_kernel() | ||
17 | * and unlock_kernel(). It is transparently dropped and reacquired | ||
18 | * over schedule(). It is used to protect legacy code that hasn't | ||
19 | * been migrated to a proper locking design yet. | ||
20 | * | ||
21 | * Don't use in new code. | ||
22 | */ | ||
23 | static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag); | ||
24 | |||
25 | |||
26 | /* | ||
27 | * Acquire/release the underlying lock from the scheduler. | ||
28 | * | ||
29 | * This is called with preemption disabled, and should | ||
30 | * return an error value if it cannot get the lock and | ||
31 | * TIF_NEED_RESCHED gets set. | ||
32 | * | ||
33 | * If it successfully gets the lock, it should increment | ||
34 | * the preemption count like any spinlock does. | ||
35 | * | ||
36 | * (This works on UP too - do_raw_spin_trylock will never | ||
37 | * return false in that case) | ||
38 | */ | ||
39 | int __lockfunc __reacquire_kernel_lock(void) | ||
40 | { | ||
41 | while (!do_raw_spin_trylock(&kernel_flag)) { | ||
42 | if (need_resched()) | ||
43 | return -EAGAIN; | ||
44 | cpu_relax(); | ||
45 | } | ||
46 | preempt_disable(); | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | void __lockfunc __release_kernel_lock(void) | ||
51 | { | ||
52 | do_raw_spin_unlock(&kernel_flag); | ||
53 | preempt_enable_no_resched(); | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * These are the BKL spinlocks - we try to be polite about preemption. | ||
58 | * If SMP is not on (ie UP preemption), this all goes away because the | ||
59 | * do_raw_spin_trylock() will always succeed. | ||
60 | */ | ||
61 | #ifdef CONFIG_PREEMPT | ||
62 | static inline void __lock_kernel(void) | ||
63 | { | ||
64 | preempt_disable(); | ||
65 | if (unlikely(!do_raw_spin_trylock(&kernel_flag))) { | ||
66 | /* | ||
67 | * If preemption was disabled even before this | ||
68 | * was called, there's nothing we can be polite | ||
69 | * about - just spin. | ||
70 | */ | ||
71 | if (preempt_count() > 1) { | ||
72 | do_raw_spin_lock(&kernel_flag); | ||
73 | return; | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Otherwise, let's wait for the kernel lock | ||
78 | * with preemption enabled.. | ||
79 | */ | ||
80 | do { | ||
81 | preempt_enable(); | ||
82 | while (raw_spin_is_locked(&kernel_flag)) | ||
83 | cpu_relax(); | ||
84 | preempt_disable(); | ||
85 | } while (!do_raw_spin_trylock(&kernel_flag)); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | #else | ||
90 | |||
91 | /* | ||
92 | * Non-preemption case - just get the spinlock | ||
93 | */ | ||
94 | static inline void __lock_kernel(void) | ||
95 | { | ||
96 | do_raw_spin_lock(&kernel_flag); | ||
97 | } | ||
98 | #endif | ||
99 | |||
100 | static inline void __unlock_kernel(void) | ||
101 | { | ||
102 | /* | ||
103 | * the BKL is not covered by lockdep, so we open-code the | ||
104 | * unlocking sequence (and thus avoid the dep-chain ops): | ||
105 | */ | ||
106 | do_raw_spin_unlock(&kernel_flag); | ||
107 | preempt_enable(); | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * Getting the big kernel lock. | ||
112 | * | ||
113 | * This cannot happen asynchronously, so we only need to | ||
114 | * worry about other CPU's. | ||
115 | */ | ||
116 | void __lockfunc _lock_kernel(const char *func, const char *file, int line) | ||
117 | { | ||
118 | int depth = current->lock_depth + 1; | ||
119 | |||
120 | if (likely(!depth)) { | ||
121 | might_sleep(); | ||
122 | __lock_kernel(); | ||
123 | } | ||
124 | current->lock_depth = depth; | ||
125 | } | ||
126 | |||
127 | void __lockfunc _unlock_kernel(const char *func, const char *file, int line) | ||
128 | { | ||
129 | BUG_ON(current->lock_depth < 0); | ||
130 | if (likely(--current->lock_depth < 0)) | ||
131 | __unlock_kernel(); | ||
132 | } | ||
133 | |||
134 | EXPORT_SYMBOL(_lock_kernel); | ||
135 | EXPORT_SYMBOL(_unlock_kernel); | ||
136 | |||