diff options
Diffstat (limited to 'lib/kernel_lock.c')
| -rw-r--r-- | lib/kernel_lock.c | 26 |
1 files changed, 14 insertions, 12 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 4ebfa5a164d..b135d04aa48 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | * | 23 | * |
| 24 | * Don't use in new code. | 24 | * Don't use in new code. |
| 25 | */ | 25 | */ |
| 26 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); | 26 | static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag); |
| 27 | 27 | ||
| 28 | 28 | ||
| 29 | /* | 29 | /* |
| @@ -36,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); | |||
| 36 | * If it successfully gets the lock, it should increment | 36 | * If it successfully gets the lock, it should increment |
| 37 | * the preemption count like any spinlock does. | 37 | * the preemption count like any spinlock does. |
| 38 | * | 38 | * |
| 39 | * (This works on UP too - _raw_spin_trylock will never | 39 | * (This works on UP too - do_raw_spin_trylock will never |
| 40 | * return false in that case) | 40 | * return false in that case) |
| 41 | */ | 41 | */ |
| 42 | int __lockfunc __reacquire_kernel_lock(void) | 42 | int __lockfunc __reacquire_kernel_lock(void) |
| 43 | { | 43 | { |
| 44 | while (!_raw_spin_trylock(&kernel_flag)) { | 44 | while (!do_raw_spin_trylock(&kernel_flag)) { |
| 45 | if (need_resched()) | 45 | if (need_resched()) |
| 46 | return -EAGAIN; | 46 | return -EAGAIN; |
| 47 | cpu_relax(); | 47 | cpu_relax(); |
| @@ -52,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void) | |||
| 52 | 52 | ||
| 53 | void __lockfunc __release_kernel_lock(void) | 53 | void __lockfunc __release_kernel_lock(void) |
| 54 | { | 54 | { |
| 55 | _raw_spin_unlock(&kernel_flag); | 55 | do_raw_spin_unlock(&kernel_flag); |
| 56 | preempt_enable_no_resched(); | 56 | preempt_enable_no_resched(); |
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | /* | 59 | /* |
| 60 | * These are the BKL spinlocks - we try to be polite about preemption. | 60 | * These are the BKL spinlocks - we try to be polite about preemption. |
| 61 | * If SMP is not on (ie UP preemption), this all goes away because the | 61 | * If SMP is not on (ie UP preemption), this all goes away because the |
| 62 | * _raw_spin_trylock() will always succeed. | 62 | * do_raw_spin_trylock() will always succeed. |
| 63 | */ | 63 | */ |
| 64 | #ifdef CONFIG_PREEMPT | 64 | #ifdef CONFIG_PREEMPT |
| 65 | static inline void __lock_kernel(void) | 65 | static inline void __lock_kernel(void) |
| 66 | { | 66 | { |
| 67 | preempt_disable(); | 67 | preempt_disable(); |
| 68 | if (unlikely(!_raw_spin_trylock(&kernel_flag))) { | 68 | if (unlikely(!do_raw_spin_trylock(&kernel_flag))) { |
| 69 | /* | 69 | /* |
| 70 | * If preemption was disabled even before this | 70 | * If preemption was disabled even before this |
| 71 | * was called, there's nothing we can be polite | 71 | * was called, there's nothing we can be polite |
| 72 | * about - just spin. | 72 | * about - just spin. |
| 73 | */ | 73 | */ |
| 74 | if (preempt_count() > 1) { | 74 | if (preempt_count() > 1) { |
| 75 | _raw_spin_lock(&kernel_flag); | 75 | do_raw_spin_lock(&kernel_flag); |
| 76 | return; | 76 | return; |
| 77 | } | 77 | } |
| 78 | 78 | ||
| @@ -82,10 +82,10 @@ static inline void __lock_kernel(void) | |||
| 82 | */ | 82 | */ |
| 83 | do { | 83 | do { |
| 84 | preempt_enable(); | 84 | preempt_enable(); |
| 85 | while (spin_is_locked(&kernel_flag)) | 85 | while (raw_spin_is_locked(&kernel_flag)) |
| 86 | cpu_relax(); | 86 | cpu_relax(); |
| 87 | preempt_disable(); | 87 | preempt_disable(); |
| 88 | } while (!_raw_spin_trylock(&kernel_flag)); | 88 | } while (!do_raw_spin_trylock(&kernel_flag)); |
| 89 | } | 89 | } |
| 90 | } | 90 | } |
| 91 | 91 | ||
| @@ -96,7 +96,7 @@ static inline void __lock_kernel(void) | |||
| 96 | */ | 96 | */ |
| 97 | static inline void __lock_kernel(void) | 97 | static inline void __lock_kernel(void) |
| 98 | { | 98 | { |
| 99 | _raw_spin_lock(&kernel_flag); | 99 | do_raw_spin_lock(&kernel_flag); |
| 100 | } | 100 | } |
| 101 | #endif | 101 | #endif |
| 102 | 102 | ||
| @@ -106,7 +106,7 @@ static inline void __unlock_kernel(void) | |||
| 106 | * the BKL is not covered by lockdep, so we open-code the | 106 | * the BKL is not covered by lockdep, so we open-code the |
| 107 | * unlocking sequence (and thus avoid the dep-chain ops): | 107 | * unlocking sequence (and thus avoid the dep-chain ops): |
| 108 | */ | 108 | */ |
| 109 | _raw_spin_unlock(&kernel_flag); | 109 | do_raw_spin_unlock(&kernel_flag); |
| 110 | preempt_enable(); | 110 | preempt_enable(); |
| 111 | } | 111 | } |
| 112 | 112 | ||
| @@ -122,8 +122,10 @@ void __lockfunc _lock_kernel(const char *func, const char *file, int line) | |||
| 122 | 122 | ||
| 123 | trace_lock_kernel(func, file, line); | 123 | trace_lock_kernel(func, file, line); |
| 124 | 124 | ||
| 125 | if (likely(!depth)) | 125 | if (likely(!depth)) { |
| 126 | might_sleep(); | ||
| 126 | __lock_kernel(); | 127 | __lock_kernel(); |
| 128 | } | ||
| 127 | current->lock_depth = depth; | 129 | current->lock_depth = depth; |
| 128 | } | 130 | } |
| 129 | 131 | ||
