aboutsummaryrefslogtreecommitdiffstats
path: root/lib/kernel_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/kernel_lock.c')
-rw-r--r--lib/kernel_lock.c120
1 files changed, 81 insertions, 39 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index cd3e82530b03..01a3c22c1b5a 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -11,79 +11,121 @@
11#include <linux/semaphore.h> 11#include <linux/semaphore.h>
12 12
13/* 13/*
14 * The 'big kernel semaphore' 14 * The 'big kernel lock'
15 * 15 *
16 * This mutex is taken and released recursively by lock_kernel() 16 * This spinlock is taken and released recursively by lock_kernel()
17 * and unlock_kernel(). It is transparently dropped and reacquired 17 * and unlock_kernel(). It is transparently dropped and reacquired
18 * over schedule(). It is used to protect legacy code that hasn't 18 * over schedule(). It is used to protect legacy code that hasn't
19 * been migrated to a proper locking design yet. 19 * been migrated to a proper locking design yet.
20 * 20 *
21 * Note: code locked by this semaphore will only be serialized against
22 * other code using the same locking facility. The code guarantees that
23 * the task remains on the same CPU.
24 *
25 * Don't use in new code. 21 * Don't use in new code.
26 */ 22 */
27static DECLARE_MUTEX(kernel_sem); 23static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
24
28 25
29/* 26/*
30 * Re-acquire the kernel semaphore. 27 * Acquire/release the underlying lock from the scheduler.
31 * 28 *
32 * This function is called with preemption off. 29 * This is called with preemption disabled, and should
30 * return an error value if it cannot get the lock and
31 * TIF_NEED_RESCHED gets set.
33 * 32 *
34 * We are executing in schedule() so the code must be extremely careful 33 * If it successfully gets the lock, it should increment
35 * about recursion, both due to the down() and due to the enabling of 34 * the preemption count like any spinlock does.
36 * preemption. schedule() will re-check the preemption flag after 35 *
37 * reacquiring the semaphore. 36 * (This works on UP too - _raw_spin_trylock will never
37 * return false in that case)
38 */ 38 */
39int __lockfunc __reacquire_kernel_lock(void) 39int __lockfunc __reacquire_kernel_lock(void)
40{ 40{
41 struct task_struct *task = current; 41 while (!_raw_spin_trylock(&kernel_flag)) {
42 int saved_lock_depth = task->lock_depth; 42 if (test_thread_flag(TIF_NEED_RESCHED))
43 43 return -EAGAIN;
44 BUG_ON(saved_lock_depth < 0); 44 cpu_relax();
45 45 }
46 task->lock_depth = -1;
47 preempt_enable_no_resched();
48
49 down(&kernel_sem);
50
51 preempt_disable(); 46 preempt_disable();
52 task->lock_depth = saved_lock_depth;
53
54 return 0; 47 return 0;
55} 48}
56 49
57void __lockfunc __release_kernel_lock(void) 50void __lockfunc __release_kernel_lock(void)
58{ 51{
59 up(&kernel_sem); 52 _raw_spin_unlock(&kernel_flag);
53 preempt_enable_no_resched();
60} 54}
61 55
62/* 56/*
63 * Getting the big kernel semaphore. 57 * These are the BKL spinlocks - we try to be polite about preemption.
58 * If SMP is not on (ie UP preemption), this all goes away because the
59 * _raw_spin_trylock() will always succeed.
64 */ 60 */
65void __lockfunc lock_kernel(void) 61#ifdef CONFIG_PREEMPT
62static inline void __lock_kernel(void)
66{ 63{
67 struct task_struct *task = current; 64 preempt_disable();
68 int depth = task->lock_depth + 1; 65 if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
66 /*
67 * If preemption was disabled even before this
68 * was called, there's nothing we can be polite
69 * about - just spin.
70 */
71 if (preempt_count() > 1) {
72 _raw_spin_lock(&kernel_flag);
73 return;
74 }
69 75
70 if (likely(!depth))
71 /* 76 /*
72 * No recursion worries - we set up lock_depth _after_ 77 * Otherwise, let's wait for the kernel lock
78 * with preemption enabled..
73 */ 79 */
74 down(&kernel_sem); 80 do {
81 preempt_enable();
82 while (spin_is_locked(&kernel_flag))
83 cpu_relax();
84 preempt_disable();
85 } while (!_raw_spin_trylock(&kernel_flag));
86 }
87}
75 88
76 task->lock_depth = depth; 89#else
90
91/*
92 * Non-preemption case - just get the spinlock
93 */
94static inline void __lock_kernel(void)
95{
96 _raw_spin_lock(&kernel_flag);
77} 97}
98#endif
78 99
79void __lockfunc unlock_kernel(void) 100static inline void __unlock_kernel(void)
80{ 101{
81 struct task_struct *task = current; 102 /*
103 * the BKL is not covered by lockdep, so we open-code the
104 * unlocking sequence (and thus avoid the dep-chain ops):
105 */
106 _raw_spin_unlock(&kernel_flag);
107 preempt_enable();
108}
82 109
83 BUG_ON(task->lock_depth < 0); 110/*
111 * Getting the big kernel lock.
112 *
113 * This cannot happen asynchronously, so we only need to
114 * worry about other CPU's.
115 */
116void __lockfunc lock_kernel(void)
117{
118 int depth = current->lock_depth+1;
119 if (likely(!depth))
120 __lock_kernel();
121 current->lock_depth = depth;
122}
84 123
85 if (likely(--task->lock_depth < 0)) 124void __lockfunc unlock_kernel(void)
86 up(&kernel_sem); 125{
126 BUG_ON(current->lock_depth < 0);
127 if (likely(--current->lock_depth < 0))
128 __unlock_kernel();
87} 129}
88 130
89EXPORT_SYMBOL(lock_kernel); 131EXPORT_SYMBOL(lock_kernel);