diff options
Diffstat (limited to 'include')
60 files changed, 1341 insertions, 1774 deletions
diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h index 80780dba9986..8197c69eff44 100644 --- a/include/asm-alpha/spinlock.h +++ b/include/asm-alpha/spinlock.h | |||
| @@ -6,7 +6,6 @@ | |||
| 6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
| 7 | #include <asm/current.h> | 7 | #include <asm/current.h> |
| 8 | 8 | ||
| 9 | |||
| 10 | /* | 9 | /* |
| 11 | * Simple spin lock operations. There are two variants, one clears IRQ's | 10 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 12 | * on the local processor, one does not. | 11 | * on the local processor, one does not. |
| @@ -14,43 +13,18 @@ | |||
| 14 | * We make no fairness assumptions. They have a cost. | 13 | * We make no fairness assumptions. They have a cost. |
| 15 | */ | 14 | */ |
| 16 | 15 | ||
| 17 | typedef struct { | 16 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 18 | volatile unsigned int lock; | 17 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 19 | #ifdef CONFIG_DEBUG_SPINLOCK | 18 | #define __raw_spin_unlock_wait(x) \ |
| 20 | int on_cpu; | 19 | do { cpu_relax(); } while ((x)->lock) |
| 21 | int line_no; | 20 | |
| 22 | void *previous; | 21 | static inline void __raw_spin_unlock(raw_spinlock_t * lock) |
| 23 | struct task_struct * task; | ||
| 24 | const char *base_file; | ||
| 25 | #endif | ||
| 26 | } spinlock_t; | ||
| 27 | |||
| 28 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 29 | #define SPIN_LOCK_UNLOCKED (spinlock_t){ 0, -1, 0, NULL, NULL, NULL } | ||
| 30 | #else | ||
| 31 | #define SPIN_LOCK_UNLOCKED (spinlock_t){ 0 } | ||
| 32 | #endif | ||
| 33 | |||
| 34 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 35 | #define spin_is_locked(x) ((x)->lock != 0) | ||
| 36 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | ||
| 37 | |||
| 38 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 39 | extern void _raw_spin_unlock(spinlock_t * lock); | ||
| 40 | extern void debug_spin_lock(spinlock_t * lock, const char *, int); | ||
| 41 | extern int debug_spin_trylock(spinlock_t * lock, const char *, int); | ||
| 42 | #define _raw_spin_lock(LOCK) \ | ||
| 43 | debug_spin_lock(LOCK, __BASE_FILE__, __LINE__) | ||
| 44 | #define _raw_spin_trylock(LOCK) \ | ||
| 45 | debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__) | ||
| 46 | #else | ||
| 47 | static inline void _raw_spin_unlock(spinlock_t * lock) | ||
| 48 | { | 22 | { |
| 49 | mb(); | 23 | mb(); |
| 50 | lock->lock = 0; | 24 | lock->lock = 0; |
| 51 | } | 25 | } |
| 52 | 26 | ||
| 53 | static inline void _raw_spin_lock(spinlock_t * lock) | 27 | static inline void __raw_spin_lock(raw_spinlock_t * lock) |
| 54 | { | 28 | { |
| 55 | long tmp; | 29 | long tmp; |
| 56 | 30 | ||
| @@ -70,80 +44,64 @@ static inline void _raw_spin_lock(spinlock_t * lock) | |||
| 70 | : "m"(lock->lock) : "memory"); | 44 | : "m"(lock->lock) : "memory"); |
| 71 | } | 45 | } |
| 72 | 46 | ||
| 73 | static inline int _raw_spin_trylock(spinlock_t *lock) | 47 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
| 74 | { | 48 | { |
| 75 | return !test_and_set_bit(0, &lock->lock); | 49 | return !test_and_set_bit(0, &lock->lock); |
| 76 | } | 50 | } |
| 77 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
| 78 | |||
| 79 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 80 | 51 | ||
| 81 | /***********************************************************/ | 52 | /***********************************************************/ |
| 82 | 53 | ||
| 83 | typedef struct { | 54 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) |
| 84 | volatile unsigned int lock; | ||
| 85 | } rwlock_t; | ||
| 86 | |||
| 87 | #define RW_LOCK_UNLOCKED (rwlock_t){ 0 } | ||
| 88 | |||
| 89 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
| 90 | |||
| 91 | static inline int read_can_lock(rwlock_t *lock) | ||
| 92 | { | 55 | { |
| 93 | return (lock->lock & 1) == 0; | 56 | return (lock->lock & 1) == 0; |
| 94 | } | 57 | } |
| 95 | 58 | ||
| 96 | static inline int write_can_lock(rwlock_t *lock) | 59 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) |
| 97 | { | 60 | { |
| 98 | return lock->lock == 0; | 61 | return lock->lock == 0; |
| 99 | } | 62 | } |
| 100 | 63 | ||
| 101 | #ifdef CONFIG_DEBUG_RWLOCK | 64 | static inline void __raw_read_lock(raw_rwlock_t *lock) |
| 102 | extern void _raw_write_lock(rwlock_t * lock); | ||
| 103 | extern void _raw_read_lock(rwlock_t * lock); | ||
| 104 | #else | ||
| 105 | static inline void _raw_write_lock(rwlock_t * lock) | ||
| 106 | { | 65 | { |
| 107 | long regx; | 66 | long regx; |
| 108 | 67 | ||
| 109 | __asm__ __volatile__( | 68 | __asm__ __volatile__( |
| 110 | "1: ldl_l %1,%0\n" | 69 | "1: ldl_l %1,%0\n" |
| 111 | " bne %1,6f\n" | 70 | " blbs %1,6f\n" |
| 112 | " lda %1,1\n" | 71 | " subl %1,2,%1\n" |
| 113 | " stl_c %1,%0\n" | 72 | " stl_c %1,%0\n" |
| 114 | " beq %1,6f\n" | 73 | " beq %1,6f\n" |
| 115 | " mb\n" | 74 | " mb\n" |
| 116 | ".subsection 2\n" | 75 | ".subsection 2\n" |
| 117 | "6: ldl %1,%0\n" | 76 | "6: ldl %1,%0\n" |
| 118 | " bne %1,6b\n" | 77 | " blbs %1,6b\n" |
| 119 | " br 1b\n" | 78 | " br 1b\n" |
| 120 | ".previous" | 79 | ".previous" |
| 121 | : "=m" (*lock), "=&r" (regx) | 80 | : "=m" (*lock), "=&r" (regx) |
| 122 | : "m" (*lock) : "memory"); | 81 | : "m" (*lock) : "memory"); |
| 123 | } | 82 | } |
| 124 | 83 | ||
| 125 | static inline void _raw_read_lock(rwlock_t * lock) | 84 | static inline void __raw_write_lock(raw_rwlock_t *lock) |
| 126 | { | 85 | { |
| 127 | long regx; | 86 | long regx; |
| 128 | 87 | ||
| 129 | __asm__ __volatile__( | 88 | __asm__ __volatile__( |
| 130 | "1: ldl_l %1,%0\n" | 89 | "1: ldl_l %1,%0\n" |
| 131 | " blbs %1,6f\n" | 90 | " bne %1,6f\n" |
| 132 | " subl %1,2,%1\n" | 91 | " lda %1,1\n" |
| 133 | " stl_c %1,%0\n" | 92 | " stl_c %1,%0\n" |
| 134 | " beq %1,6f\n" | 93 | " beq %1,6f\n" |
| 135 | " mb\n" | 94 | " mb\n" |
| 136 | ".subsection 2\n" | 95 | ".subsection 2\n" |
| 137 | "6: ldl %1,%0\n" | 96 | "6: ldl %1,%0\n" |
| 138 | " blbs %1,6b\n" | 97 | " bne %1,6b\n" |
| 139 | " br 1b\n" | 98 | " br 1b\n" |
| 140 | ".previous" | 99 | ".previous" |
| 141 | : "=m" (*lock), "=&r" (regx) | 100 | : "=m" (*lock), "=&r" (regx) |
| 142 | : "m" (*lock) : "memory"); | 101 | : "m" (*lock) : "memory"); |
| 143 | } | 102 | } |
| 144 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
| 145 | 103 | ||
| 146 | static inline int _raw_read_trylock(rwlock_t * lock) | 104 | static inline int __raw_read_trylock(raw_rwlock_t * lock) |
| 147 | { | 105 | { |
| 148 | long regx; | 106 | long regx; |
| 149 | int success; | 107 | int success; |
| @@ -165,7 +123,7 @@ static inline int _raw_read_trylock(rwlock_t * lock) | |||
| 165 | return success; | 123 | return success; |
| 166 | } | 124 | } |
| 167 | 125 | ||
| 168 | static inline int _raw_write_trylock(rwlock_t * lock) | 126 | static inline int __raw_write_trylock(raw_rwlock_t * lock) |
| 169 | { | 127 | { |
| 170 | long regx; | 128 | long regx; |
| 171 | int success; | 129 | int success; |
| @@ -187,13 +145,7 @@ static inline int _raw_write_trylock(rwlock_t * lock) | |||
| 187 | return success; | 145 | return success; |
| 188 | } | 146 | } |
| 189 | 147 | ||
| 190 | static inline void _raw_write_unlock(rwlock_t * lock) | 148 | static inline void __raw_read_unlock(raw_rwlock_t * lock) |
| 191 | { | ||
| 192 | mb(); | ||
| 193 | lock->lock = 0; | ||
| 194 | } | ||
| 195 | |||
| 196 | static inline void _raw_read_unlock(rwlock_t * lock) | ||
| 197 | { | 149 | { |
| 198 | long regx; | 150 | long regx; |
| 199 | __asm__ __volatile__( | 151 | __asm__ __volatile__( |
| @@ -209,4 +161,10 @@ static inline void _raw_read_unlock(rwlock_t * lock) | |||
| 209 | : "m" (*lock) : "memory"); | 161 | : "m" (*lock) : "memory"); |
| 210 | } | 162 | } |
| 211 | 163 | ||
| 164 | static inline void __raw_write_unlock(raw_rwlock_t * lock) | ||
| 165 | { | ||
| 166 | mb(); | ||
| 167 | lock->lock = 0; | ||
| 168 | } | ||
| 169 | |||
| 212 | #endif /* _ALPHA_SPINLOCK_H */ | 170 | #endif /* _ALPHA_SPINLOCK_H */ |
diff --git a/include/asm-alpha/spinlock_types.h b/include/asm-alpha/spinlock_types.h new file mode 100644 index 000000000000..8141eb5ebf0d --- /dev/null +++ b/include/asm-alpha/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef _ALPHA_SPINLOCK_TYPES_H | ||
| 2 | #define _ALPHA_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h index 1f906d09b688..cb4906b45555 100644 --- a/include/asm-arm/spinlock.h +++ b/include/asm-arm/spinlock.h | |||
| @@ -16,21 +16,14 @@ | |||
| 16 | * Unlocked value: 0 | 16 | * Unlocked value: 0 |
| 17 | * Locked value: 1 | 17 | * Locked value: 1 |
| 18 | */ | 18 | */ |
| 19 | typedef struct { | ||
| 20 | volatile unsigned int lock; | ||
| 21 | #ifdef CONFIG_PREEMPT | ||
| 22 | unsigned int break_lock; | ||
| 23 | #endif | ||
| 24 | } spinlock_t; | ||
| 25 | 19 | ||
| 26 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | 20 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 21 | #define __raw_spin_unlock_wait(lock) \ | ||
| 22 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
| 27 | 23 | ||
| 28 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0) | 24 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 29 | #define spin_is_locked(x) ((x)->lock != 0) | ||
| 30 | #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) | ||
| 31 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 32 | 25 | ||
| 33 | static inline void _raw_spin_lock(spinlock_t *lock) | 26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 34 | { | 27 | { |
| 35 | unsigned long tmp; | 28 | unsigned long tmp; |
| 36 | 29 | ||
| @@ -47,7 +40,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 47 | smp_mb(); | 40 | smp_mb(); |
| 48 | } | 41 | } |
| 49 | 42 | ||
| 50 | static inline int _raw_spin_trylock(spinlock_t *lock) | 43 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
| 51 | { | 44 | { |
| 52 | unsigned long tmp; | 45 | unsigned long tmp; |
| 53 | 46 | ||
| @@ -67,7 +60,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
| 67 | } | 60 | } |
| 68 | } | 61 | } |
| 69 | 62 | ||
| 70 | static inline void _raw_spin_unlock(spinlock_t *lock) | 63 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 71 | { | 64 | { |
| 72 | smp_mb(); | 65 | smp_mb(); |
| 73 | 66 | ||
| @@ -80,23 +73,14 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
| 80 | 73 | ||
| 81 | /* | 74 | /* |
| 82 | * RWLOCKS | 75 | * RWLOCKS |
| 83 | */ | 76 | * |
| 84 | typedef struct { | 77 | * |
| 85 | volatile unsigned int lock; | ||
| 86 | #ifdef CONFIG_PREEMPT | ||
| 87 | unsigned int break_lock; | ||
| 88 | #endif | ||
| 89 | } rwlock_t; | ||
| 90 | |||
| 91 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
| 92 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0) | ||
| 93 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) | ||
| 94 | |||
| 95 | /* | ||
| 96 | * Write locks are easy - we just set bit 31. When unlocking, we can | 78 | * Write locks are easy - we just set bit 31. When unlocking, we can |
| 97 | * just write zero since the lock is exclusively held. | 79 | * just write zero since the lock is exclusively held. |
| 98 | */ | 80 | */ |
| 99 | static inline void _raw_write_lock(rwlock_t *rw) | 81 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) |
| 82 | |||
| 83 | static inline void __raw_write_lock(rwlock_t *rw) | ||
| 100 | { | 84 | { |
| 101 | unsigned long tmp; | 85 | unsigned long tmp; |
| 102 | 86 | ||
| @@ -113,7 +97,7 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
| 113 | smp_mb(); | 97 | smp_mb(); |
| 114 | } | 98 | } |
| 115 | 99 | ||
| 116 | static inline int _raw_write_trylock(rwlock_t *rw) | 100 | static inline int __raw_write_trylock(rwlock_t *rw) |
| 117 | { | 101 | { |
| 118 | unsigned long tmp; | 102 | unsigned long tmp; |
| 119 | 103 | ||
| @@ -133,7 +117,7 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
| 133 | } | 117 | } |
| 134 | } | 118 | } |
| 135 | 119 | ||
| 136 | static inline void _raw_write_unlock(rwlock_t *rw) | 120 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
| 137 | { | 121 | { |
| 138 | smp_mb(); | 122 | smp_mb(); |
| 139 | 123 | ||
| @@ -156,7 +140,7 @@ static inline void _raw_write_unlock(rwlock_t *rw) | |||
| 156 | * currently active. However, we know we won't have any write | 140 | * currently active. However, we know we won't have any write |
| 157 | * locks. | 141 | * locks. |
| 158 | */ | 142 | */ |
| 159 | static inline void _raw_read_lock(rwlock_t *rw) | 143 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 160 | { | 144 | { |
| 161 | unsigned long tmp, tmp2; | 145 | unsigned long tmp, tmp2; |
| 162 | 146 | ||
| @@ -173,7 +157,7 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
| 173 | smp_mb(); | 157 | smp_mb(); |
| 174 | } | 158 | } |
| 175 | 159 | ||
| 176 | static inline void _raw_read_unlock(rwlock_t *rw) | 160 | static inline void __raw_read_unlock(rwlock_t *rw) |
| 177 | { | 161 | { |
| 178 | unsigned long tmp, tmp2; | 162 | unsigned long tmp, tmp2; |
| 179 | 163 | ||
| @@ -190,6 +174,6 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
| 190 | : "cc"); | 174 | : "cc"); |
| 191 | } | 175 | } |
| 192 | 176 | ||
| 193 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 177 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 194 | 178 | ||
| 195 | #endif /* __ASM_SPINLOCK_H */ | 179 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-arm/spinlock_types.h b/include/asm-arm/spinlock_types.h new file mode 100644 index 000000000000..43e83f6d2ee5 --- /dev/null +++ b/include/asm-arm/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h index 278de61224d1..c49df635a80f 100644 --- a/include/asm-arm/unistd.h +++ b/include/asm-arm/unistd.h | |||
| @@ -355,6 +355,9 @@ | |||
| 355 | #define __NR_inotify_init (__NR_SYSCALL_BASE+316) | 355 | #define __NR_inotify_init (__NR_SYSCALL_BASE+316) |
| 356 | #define __NR_inotify_add_watch (__NR_SYSCALL_BASE+317) | 356 | #define __NR_inotify_add_watch (__NR_SYSCALL_BASE+317) |
| 357 | #define __NR_inotify_rm_watch (__NR_SYSCALL_BASE+318) | 357 | #define __NR_inotify_rm_watch (__NR_SYSCALL_BASE+318) |
| 358 | #define __NR_mbind (__NR_SYSCALL_BASE+319) | ||
| 359 | #define __NR_get_mempolicy (__NR_SYSCALL_BASE+320) | ||
| 360 | #define __NR_set_mempolicy (__NR_SYSCALL_BASE+321) | ||
| 358 | 361 | ||
| 359 | /* | 362 | /* |
| 360 | * The following SWIs are ARM private. | 363 | * The following SWIs are ARM private. |
diff --git a/include/asm-arm26/hardirq.h b/include/asm-arm26/hardirq.h index 791ee1da9bfa..dc28daab8aa8 100644 --- a/include/asm-arm26/hardirq.h +++ b/include/asm-arm26/hardirq.h | |||
| @@ -22,8 +22,6 @@ typedef struct { | |||
| 22 | # error HARDIRQ_BITS is too low! | 22 | # error HARDIRQ_BITS is too low! |
| 23 | #endif | 23 | #endif |
| 24 | 24 | ||
| 25 | #define irq_enter() (preempt_count() += HARDIRQ_OFFSET) | ||
| 26 | |||
| 27 | #ifndef CONFIG_SMP | 25 | #ifndef CONFIG_SMP |
| 28 | 26 | ||
| 29 | extern asmlinkage void __do_softirq(void); | 27 | extern asmlinkage void __do_softirq(void); |
diff --git a/include/asm-i386/div64.h b/include/asm-i386/div64.h index 28ed8b296afc..75c67c785bb8 100644 --- a/include/asm-i386/div64.h +++ b/include/asm-i386/div64.h | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | */ | 35 | */ |
| 36 | #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) | 36 | #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) |
| 37 | 37 | ||
| 38 | extern inline long | 38 | static inline long |
| 39 | div_ll_X_l_rem(long long divs, long div, long *rem) | 39 | div_ll_X_l_rem(long long divs, long div, long *rem) |
| 40 | { | 40 | { |
| 41 | long dum2; | 41 | long dum2; |
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 37bef8ed7bed..0a4ec764377c 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h | |||
| @@ -679,7 +679,7 @@ static inline void rep_nop(void) | |||
| 679 | However we don't do prefetches for pre XP Athlons currently | 679 | However we don't do prefetches for pre XP Athlons currently |
| 680 | That should be fixed. */ | 680 | That should be fixed. */ |
| 681 | #define ARCH_HAS_PREFETCH | 681 | #define ARCH_HAS_PREFETCH |
| 682 | extern inline void prefetch(const void *x) | 682 | static inline void prefetch(const void *x) |
| 683 | { | 683 | { |
| 684 | alternative_input(ASM_NOP4, | 684 | alternative_input(ASM_NOP4, |
| 685 | "prefetchnta (%1)", | 685 | "prefetchnta (%1)", |
| @@ -693,7 +693,7 @@ extern inline void prefetch(const void *x) | |||
| 693 | 693 | ||
| 694 | /* 3dnow! prefetch to get an exclusive cache line. Useful for | 694 | /* 3dnow! prefetch to get an exclusive cache line. Useful for |
| 695 | spinlocks to avoid one state transition in the cache coherency protocol. */ | 695 | spinlocks to avoid one state transition in the cache coherency protocol. */ |
| 696 | extern inline void prefetchw(const void *x) | 696 | static inline void prefetchw(const void *x) |
| 697 | { | 697 | { |
| 698 | alternative_input(ASM_NOP4, | 698 | alternative_input(ASM_NOP4, |
| 699 | "prefetchw (%1)", | 699 | "prefetchw (%1)", |
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index f9ff31f40036..23604350cdf4 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
| @@ -7,46 +7,21 @@ | |||
| 7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
| 8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
| 9 | 9 | ||
| 10 | asmlinkage int printk(const char * fmt, ...) | ||
| 11 | __attribute__ ((format (printf, 1, 2))); | ||
| 12 | |||
| 13 | /* | 10 | /* |
| 14 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 11 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 15 | */ | 12 | * |
| 16 | |||
| 17 | typedef struct { | ||
| 18 | volatile unsigned int slock; | ||
| 19 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 20 | unsigned magic; | ||
| 21 | #endif | ||
| 22 | #ifdef CONFIG_PREEMPT | ||
| 23 | unsigned int break_lock; | ||
| 24 | #endif | ||
| 25 | } spinlock_t; | ||
| 26 | |||
| 27 | #define SPINLOCK_MAGIC 0xdead4ead | ||
| 28 | |||
| 29 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 30 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
| 31 | #else | ||
| 32 | #define SPINLOCK_MAGIC_INIT /* */ | ||
| 33 | #endif | ||
| 34 | |||
| 35 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
| 36 | |||
| 37 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 38 | |||
| 39 | /* | ||
| 40 | * Simple spin lock operations. There are two variants, one clears IRQ's | 13 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 41 | * on the local processor, one does not. | 14 | * on the local processor, one does not. |
| 42 | * | 15 | * |
| 43 | * We make no fairness assumptions. They have a cost. | 16 | * We make no fairness assumptions. They have a cost. |
| 17 | * | ||
| 18 | * (the type definitions are in asm/spinlock_types.h) | ||
| 44 | */ | 19 | */ |
| 45 | 20 | ||
| 46 | #define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0) | 21 | #define __raw_spin_is_locked(x) \ |
| 47 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 22 | (*(volatile signed char *)(&(x)->slock) <= 0) |
| 48 | 23 | ||
| 49 | #define spin_lock_string \ | 24 | #define __raw_spin_lock_string \ |
| 50 | "\n1:\t" \ | 25 | "\n1:\t" \ |
| 51 | "lock ; decb %0\n\t" \ | 26 | "lock ; decb %0\n\t" \ |
| 52 | "jns 3f\n" \ | 27 | "jns 3f\n" \ |
| @@ -57,7 +32,7 @@ typedef struct { | |||
| 57 | "jmp 1b\n" \ | 32 | "jmp 1b\n" \ |
| 58 | "3:\n\t" | 33 | "3:\n\t" |
| 59 | 34 | ||
| 60 | #define spin_lock_string_flags \ | 35 | #define __raw_spin_lock_string_flags \ |
| 61 | "\n1:\t" \ | 36 | "\n1:\t" \ |
| 62 | "lock ; decb %0\n\t" \ | 37 | "lock ; decb %0\n\t" \ |
| 63 | "jns 4f\n\t" \ | 38 | "jns 4f\n\t" \ |
| @@ -73,86 +48,71 @@ typedef struct { | |||
| 73 | "jmp 1b\n" \ | 48 | "jmp 1b\n" \ |
| 74 | "4:\n\t" | 49 | "4:\n\t" |
| 75 | 50 | ||
| 51 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
| 52 | { | ||
| 53 | __asm__ __volatile__( | ||
| 54 | __raw_spin_lock_string | ||
| 55 | :"=m" (lock->slock) : : "memory"); | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
| 59 | { | ||
| 60 | __asm__ __volatile__( | ||
| 61 | __raw_spin_lock_string_flags | ||
| 62 | :"=m" (lock->slock) : "r" (flags) : "memory"); | ||
| 63 | } | ||
| 64 | |||
| 65 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
| 66 | { | ||
| 67 | char oldval; | ||
| 68 | __asm__ __volatile__( | ||
| 69 | "xchgb %b0,%1" | ||
| 70 | :"=q" (oldval), "=m" (lock->slock) | ||
| 71 | :"0" (0) : "memory"); | ||
| 72 | return oldval > 0; | ||
| 73 | } | ||
| 74 | |||
| 76 | /* | 75 | /* |
| 77 | * This works. Despite all the confusion. | 76 | * __raw_spin_unlock based on writing $1 to the low byte. |
| 78 | * (except on PPro SMP or if we are using OOSTORE) | 77 | * This method works. Despite all the confusion. |
| 78 | * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) | ||
| 79 | * (PPro errata 66, 92) | 79 | * (PPro errata 66, 92) |
| 80 | */ | 80 | */ |
| 81 | 81 | ||
| 82 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | 82 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) |
| 83 | 83 | ||
| 84 | #define spin_unlock_string \ | 84 | #define __raw_spin_unlock_string \ |
| 85 | "movb $1,%0" \ | 85 | "movb $1,%0" \ |
| 86 | :"=m" (lock->slock) : : "memory" | 86 | :"=m" (lock->slock) : : "memory" |
| 87 | 87 | ||
| 88 | 88 | ||
| 89 | static inline void _raw_spin_unlock(spinlock_t *lock) | 89 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 90 | { | 90 | { |
| 91 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 92 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
| 93 | BUG_ON(!spin_is_locked(lock)); | ||
| 94 | #endif | ||
| 95 | __asm__ __volatile__( | 91 | __asm__ __volatile__( |
| 96 | spin_unlock_string | 92 | __raw_spin_unlock_string |
| 97 | ); | 93 | ); |
| 98 | } | 94 | } |
| 99 | 95 | ||
| 100 | #else | 96 | #else |
| 101 | 97 | ||
| 102 | #define spin_unlock_string \ | 98 | #define __raw_spin_unlock_string \ |
| 103 | "xchgb %b0, %1" \ | 99 | "xchgb %b0, %1" \ |
| 104 | :"=q" (oldval), "=m" (lock->slock) \ | 100 | :"=q" (oldval), "=m" (lock->slock) \ |
| 105 | :"0" (oldval) : "memory" | 101 | :"0" (oldval) : "memory" |
| 106 | 102 | ||
| 107 | static inline void _raw_spin_unlock(spinlock_t *lock) | 103 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 108 | { | 104 | { |
| 109 | char oldval = 1; | 105 | char oldval = 1; |
| 110 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 111 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
| 112 | BUG_ON(!spin_is_locked(lock)); | ||
| 113 | #endif | ||
| 114 | __asm__ __volatile__( | ||
| 115 | spin_unlock_string | ||
| 116 | ); | ||
| 117 | } | ||
| 118 | 106 | ||
| 119 | #endif | ||
| 120 | |||
| 121 | static inline int _raw_spin_trylock(spinlock_t *lock) | ||
| 122 | { | ||
| 123 | char oldval; | ||
| 124 | __asm__ __volatile__( | 107 | __asm__ __volatile__( |
| 125 | "xchgb %b0,%1" | 108 | __raw_spin_unlock_string |
| 126 | :"=q" (oldval), "=m" (lock->slock) | 109 | ); |
| 127 | :"0" (0) : "memory"); | ||
| 128 | return oldval > 0; | ||
| 129 | } | 110 | } |
| 130 | 111 | ||
| 131 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
| 132 | { | ||
| 133 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 134 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
| 135 | printk("eip: %p\n", __builtin_return_address(0)); | ||
| 136 | BUG(); | ||
| 137 | } | ||
| 138 | #endif | 112 | #endif |
| 139 | __asm__ __volatile__( | ||
| 140 | spin_lock_string | ||
| 141 | :"=m" (lock->slock) : : "memory"); | ||
| 142 | } | ||
| 143 | 113 | ||
| 144 | static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | 114 | #define __raw_spin_unlock_wait(lock) \ |
| 145 | { | 115 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
| 146 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 147 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
| 148 | printk("eip: %p\n", __builtin_return_address(0)); | ||
| 149 | BUG(); | ||
| 150 | } | ||
| 151 | #endif | ||
| 152 | __asm__ __volatile__( | ||
| 153 | spin_lock_string_flags | ||
| 154 | :"=m" (lock->slock) : "r" (flags) : "memory"); | ||
| 155 | } | ||
| 156 | 116 | ||
| 157 | /* | 117 | /* |
| 158 | * Read-write spinlocks, allowing multiple readers | 118 | * Read-write spinlocks, allowing multiple readers |
| @@ -163,72 +123,41 @@ static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |||
| 163 | * can "mix" irq-safe locks - any writer needs to get a | 123 | * can "mix" irq-safe locks - any writer needs to get a |
| 164 | * irq-safe write-lock, but readers can get non-irqsafe | 124 | * irq-safe write-lock, but readers can get non-irqsafe |
| 165 | * read-locks. | 125 | * read-locks. |
| 126 | * | ||
| 127 | * On x86, we implement read-write locks as a 32-bit counter | ||
| 128 | * with the high bit (sign) being the "contended" bit. | ||
| 129 | * | ||
| 130 | * The inline assembly is non-obvious. Think about it. | ||
| 131 | * | ||
| 132 | * Changed to use the same technique as rw semaphores. See | ||
| 133 | * semaphore.h for details. -ben | ||
| 134 | * | ||
| 135 | * the helpers are in arch/i386/kernel/semaphore.c | ||
| 166 | */ | 136 | */ |
| 167 | typedef struct { | ||
| 168 | volatile unsigned int lock; | ||
| 169 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 170 | unsigned magic; | ||
| 171 | #endif | ||
| 172 | #ifdef CONFIG_PREEMPT | ||
| 173 | unsigned int break_lock; | ||
| 174 | #endif | ||
| 175 | } rwlock_t; | ||
| 176 | |||
| 177 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
| 178 | |||
| 179 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 180 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
| 181 | #else | ||
| 182 | #define RWLOCK_MAGIC_INIT /* */ | ||
| 183 | #endif | ||
| 184 | |||
| 185 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
| 186 | |||
| 187 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
| 188 | 137 | ||
| 189 | /** | 138 | /** |
| 190 | * read_can_lock - would read_trylock() succeed? | 139 | * read_can_lock - would read_trylock() succeed? |
| 191 | * @lock: the rwlock in question. | 140 | * @lock: the rwlock in question. |
| 192 | */ | 141 | */ |
| 193 | #define read_can_lock(x) ((int)(x)->lock > 0) | 142 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
| 194 | 143 | ||
| 195 | /** | 144 | /** |
| 196 | * write_can_lock - would write_trylock() succeed? | 145 | * write_can_lock - would write_trylock() succeed? |
| 197 | * @lock: the rwlock in question. | 146 | * @lock: the rwlock in question. |
| 198 | */ | 147 | */ |
| 199 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 148 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
| 200 | 149 | ||
| 201 | /* | 150 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 202 | * On x86, we implement read-write locks as a 32-bit counter | ||
| 203 | * with the high bit (sign) being the "contended" bit. | ||
| 204 | * | ||
| 205 | * The inline assembly is non-obvious. Think about it. | ||
| 206 | * | ||
| 207 | * Changed to use the same technique as rw semaphores. See | ||
| 208 | * semaphore.h for details. -ben | ||
| 209 | */ | ||
| 210 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
| 211 | |||
| 212 | static inline void _raw_read_lock(rwlock_t *rw) | ||
| 213 | { | 151 | { |
| 214 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 215 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
| 216 | #endif | ||
| 217 | __build_read_lock(rw, "__read_lock_failed"); | 152 | __build_read_lock(rw, "__read_lock_failed"); |
| 218 | } | 153 | } |
| 219 | 154 | ||
| 220 | static inline void _raw_write_lock(rwlock_t *rw) | 155 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 221 | { | 156 | { |
| 222 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 223 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
| 224 | #endif | ||
| 225 | __build_write_lock(rw, "__write_lock_failed"); | 157 | __build_write_lock(rw, "__write_lock_failed"); |
| 226 | } | 158 | } |
| 227 | 159 | ||
| 228 | #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") | 160 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
| 229 | #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") | ||
| 230 | |||
| 231 | static inline int _raw_read_trylock(rwlock_t *lock) | ||
| 232 | { | 161 | { |
| 233 | atomic_t *count = (atomic_t *)lock; | 162 | atomic_t *count = (atomic_t *)lock; |
| 234 | atomic_dec(count); | 163 | atomic_dec(count); |
| @@ -238,7 +167,7 @@ static inline int _raw_read_trylock(rwlock_t *lock) | |||
| 238 | return 0; | 167 | return 0; |
| 239 | } | 168 | } |
| 240 | 169 | ||
| 241 | static inline int _raw_write_trylock(rwlock_t *lock) | 170 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
| 242 | { | 171 | { |
| 243 | atomic_t *count = (atomic_t *)lock; | 172 | atomic_t *count = (atomic_t *)lock; |
| 244 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 173 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
| @@ -247,4 +176,15 @@ static inline int _raw_write_trylock(rwlock_t *lock) | |||
| 247 | return 0; | 176 | return 0; |
| 248 | } | 177 | } |
| 249 | 178 | ||
| 179 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
| 180 | { | ||
| 181 | asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); | ||
| 182 | } | ||
| 183 | |||
| 184 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
| 185 | { | ||
| 186 | asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0" | ||
| 187 | : "=m" (rw->lock) : : "memory"); | ||
| 188 | } | ||
| 189 | |||
| 250 | #endif /* __ASM_SPINLOCK_H */ | 190 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h new file mode 100644 index 000000000000..59efe849f351 --- /dev/null +++ b/include/asm-i386/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int slock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index d2430aa0d49d..5b78611411c3 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h | |||
| @@ -17,28 +17,20 @@ | |||
| 17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
| 18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
| 19 | 19 | ||
| 20 | typedef struct { | 20 | #define __raw_spin_lock_init(x) ((x)->lock = 0) |
| 21 | volatile unsigned int lock; | ||
| 22 | #ifdef CONFIG_PREEMPT | ||
| 23 | unsigned int break_lock; | ||
| 24 | #endif | ||
| 25 | } spinlock_t; | ||
| 26 | |||
| 27 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
| 28 | #define spin_lock_init(x) ((x)->lock = 0) | ||
| 29 | 21 | ||
| 30 | #ifdef ASM_SUPPORTED | 22 | #ifdef ASM_SUPPORTED |
| 31 | /* | 23 | /* |
| 32 | * Try to get the lock. If we fail to get the lock, make a non-standard call to | 24 | * Try to get the lock. If we fail to get the lock, make a non-standard call to |
| 33 | * ia64_spinlock_contention(). We do not use a normal call because that would force all | 25 | * ia64_spinlock_contention(). We do not use a normal call because that would force all |
| 34 | * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is | 26 | * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is |
| 35 | * carefully coded to touch only those registers that spin_lock() marks "clobbered". | 27 | * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered". |
| 36 | */ | 28 | */ |
| 37 | 29 | ||
| 38 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" | 30 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" |
| 39 | 31 | ||
| 40 | static inline void | 32 | static inline void |
| 41 | _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | 33 | __raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags) |
| 42 | { | 34 | { |
| 43 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; | 35 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; |
| 44 | 36 | ||
| @@ -94,17 +86,17 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |||
| 94 | #endif | 86 | #endif |
| 95 | } | 87 | } |
| 96 | 88 | ||
| 97 | #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) | 89 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) |
| 98 | 90 | ||
| 99 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ | 91 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ |
| 100 | static inline void _raw_spin_unlock(spinlock_t *x) { | 92 | static inline void __raw_spin_unlock(raw_spinlock_t *x) { |
| 101 | barrier(); | 93 | barrier(); |
| 102 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); | 94 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); |
| 103 | } | 95 | } |
| 104 | 96 | ||
| 105 | #else /* !ASM_SUPPORTED */ | 97 | #else /* !ASM_SUPPORTED */ |
| 106 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 98 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 107 | # define _raw_spin_lock(x) \ | 99 | # define __raw_spin_lock(x) \ |
| 108 | do { \ | 100 | do { \ |
| 109 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ | 101 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ |
| 110 | __u64 ia64_spinlock_val; \ | 102 | __u64 ia64_spinlock_val; \ |
| @@ -117,29 +109,20 @@ do { \ | |||
| 117 | } while (ia64_spinlock_val); \ | 109 | } while (ia64_spinlock_val); \ |
| 118 | } \ | 110 | } \ |
| 119 | } while (0) | 111 | } while (0) |
| 120 | #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) | 112 | #define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0) |
| 121 | #endif /* !ASM_SUPPORTED */ | 113 | #endif /* !ASM_SUPPORTED */ |
| 122 | 114 | ||
| 123 | #define spin_is_locked(x) ((x)->lock != 0) | 115 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 124 | #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) | 116 | #define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) |
| 125 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | 117 | #define __raw_spin_unlock_wait(lock) \ |
| 126 | 118 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | |
| 127 | typedef struct { | ||
| 128 | volatile unsigned int read_counter : 24; | ||
| 129 | volatile unsigned int write_lock : 8; | ||
| 130 | #ifdef CONFIG_PREEMPT | ||
| 131 | unsigned int break_lock; | ||
| 132 | #endif | ||
| 133 | } rwlock_t; | ||
| 134 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } | ||
| 135 | 119 | ||
| 136 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | 120 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
| 137 | #define read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 121 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) |
| 138 | #define write_can_lock(rw) (*(volatile int *)(rw) == 0) | ||
| 139 | 122 | ||
| 140 | #define _raw_read_lock(rw) \ | 123 | #define __raw_read_lock(rw) \ |
| 141 | do { \ | 124 | do { \ |
| 142 | rwlock_t *__read_lock_ptr = (rw); \ | 125 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
| 143 | \ | 126 | \ |
| 144 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ | 127 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ |
| 145 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 128 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
| @@ -148,14 +131,14 @@ do { \ | |||
| 148 | } \ | 131 | } \ |
| 149 | } while (0) | 132 | } while (0) |
| 150 | 133 | ||
| 151 | #define _raw_read_unlock(rw) \ | 134 | #define __raw_read_unlock(rw) \ |
| 152 | do { \ | 135 | do { \ |
| 153 | rwlock_t *__read_lock_ptr = (rw); \ | 136 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
| 154 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 137 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
| 155 | } while (0) | 138 | } while (0) |
| 156 | 139 | ||
| 157 | #ifdef ASM_SUPPORTED | 140 | #ifdef ASM_SUPPORTED |
| 158 | #define _raw_write_lock(rw) \ | 141 | #define __raw_write_lock(rw) \ |
| 159 | do { \ | 142 | do { \ |
| 160 | __asm__ __volatile__ ( \ | 143 | __asm__ __volatile__ ( \ |
| 161 | "mov ar.ccv = r0\n" \ | 144 | "mov ar.ccv = r0\n" \ |
| @@ -170,7 +153,7 @@ do { \ | |||
| 170 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ | 153 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ |
| 171 | } while(0) | 154 | } while(0) |
| 172 | 155 | ||
| 173 | #define _raw_write_trylock(rw) \ | 156 | #define __raw_write_trylock(rw) \ |
| 174 | ({ \ | 157 | ({ \ |
| 175 | register long result; \ | 158 | register long result; \ |
| 176 | \ | 159 | \ |
| @@ -182,7 +165,7 @@ do { \ | |||
| 182 | (result == 0); \ | 165 | (result == 0); \ |
| 183 | }) | 166 | }) |
| 184 | 167 | ||
| 185 | static inline void _raw_write_unlock(rwlock_t *x) | 168 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
| 186 | { | 169 | { |
| 187 | u8 *y = (u8 *)x; | 170 | u8 *y = (u8 *)x; |
| 188 | barrier(); | 171 | barrier(); |
| @@ -191,7 +174,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
| 191 | 174 | ||
| 192 | #else /* !ASM_SUPPORTED */ | 175 | #else /* !ASM_SUPPORTED */ |
| 193 | 176 | ||
| 194 | #define _raw_write_lock(l) \ | 177 | #define __raw_write_lock(l) \ |
| 195 | ({ \ | 178 | ({ \ |
| 196 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 179 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
| 197 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ | 180 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ |
| @@ -202,7 +185,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
| 202 | } while (ia64_val); \ | 185 | } while (ia64_val); \ |
| 203 | }) | 186 | }) |
| 204 | 187 | ||
| 205 | #define _raw_write_trylock(rw) \ | 188 | #define __raw_write_trylock(rw) \ |
| 206 | ({ \ | 189 | ({ \ |
| 207 | __u64 ia64_val; \ | 190 | __u64 ia64_val; \ |
| 208 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ | 191 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ |
| @@ -210,7 +193,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
| 210 | (ia64_val == 0); \ | 193 | (ia64_val == 0); \ |
| 211 | }) | 194 | }) |
| 212 | 195 | ||
| 213 | static inline void _raw_write_unlock(rwlock_t *x) | 196 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
| 214 | { | 197 | { |
| 215 | barrier(); | 198 | barrier(); |
| 216 | x->write_lock = 0; | 199 | x->write_lock = 0; |
| @@ -218,6 +201,6 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
| 218 | 201 | ||
| 219 | #endif /* !ASM_SUPPORTED */ | 202 | #endif /* !ASM_SUPPORTED */ |
| 220 | 203 | ||
| 221 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 204 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 222 | 205 | ||
| 223 | #endif /* _ASM_IA64_SPINLOCK_H */ | 206 | #endif /* _ASM_IA64_SPINLOCK_H */ |
diff --git a/include/asm-ia64/spinlock_types.h b/include/asm-ia64/spinlock_types.h new file mode 100644 index 000000000000..474e46f1ab4a --- /dev/null +++ b/include/asm-ia64/spinlock_types.h | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | #ifndef _ASM_IA64_SPINLOCK_TYPES_H | ||
| 2 | #define _ASM_IA64_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int read_counter : 31; | ||
| 16 | volatile unsigned int write_lock : 1; | ||
| 17 | } raw_rwlock_t; | ||
| 18 | |||
| 19 | #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } | ||
| 20 | |||
| 21 | #endif | ||
diff --git a/include/asm-m32r/spinlock.h b/include/asm-m32r/spinlock.h index 6608d8371c50..7de7def28da9 100644 --- a/include/asm-m32r/spinlock.h +++ b/include/asm-m32r/spinlock.h | |||
| @@ -14,57 +14,30 @@ | |||
| 14 | #include <asm/atomic.h> | 14 | #include <asm/atomic.h> |
| 15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
| 16 | 16 | ||
| 17 | extern int printk(const char * fmt, ...) | ||
| 18 | __attribute__ ((format (printf, 1, 2))); | ||
| 19 | |||
| 20 | #define RW_LOCK_BIAS 0x01000000 | ||
| 21 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
| 22 | |||
| 23 | /* | 17 | /* |
| 24 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 18 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 25 | */ | 19 | * |
| 26 | 20 | * (the type definitions are in asm/spinlock_types.h) | |
| 27 | typedef struct { | 21 | * |
| 28 | volatile int slock; | ||
| 29 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 30 | unsigned magic; | ||
| 31 | #endif | ||
| 32 | #ifdef CONFIG_PREEMPT | ||
| 33 | unsigned int break_lock; | ||
| 34 | #endif | ||
| 35 | } spinlock_t; | ||
| 36 | |||
| 37 | #define SPINLOCK_MAGIC 0xdead4ead | ||
| 38 | |||
| 39 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 40 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
| 41 | #else | ||
| 42 | #define SPINLOCK_MAGIC_INIT /* */ | ||
| 43 | #endif | ||
| 44 | |||
| 45 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
| 46 | |||
| 47 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 48 | |||
| 49 | /* | ||
| 50 | * Simple spin lock operations. There are two variants, one clears IRQ's | 22 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 51 | * on the local processor, one does not. | 23 | * on the local processor, one does not. |
| 52 | * | 24 | * |
| 53 | * We make no fairness assumptions. They have a cost. | 25 | * We make no fairness assumptions. They have a cost. |
| 54 | */ | 26 | */ |
| 55 | 27 | ||
| 56 | #define spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) | 28 | #define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) |
| 57 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 29 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 58 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 30 | #define __raw_spin_unlock_wait(x) \ |
| 31 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | ||
| 59 | 32 | ||
| 60 | /** | 33 | /** |
| 61 | * _raw_spin_trylock - Try spin lock and return a result | 34 | * __raw_spin_trylock - Try spin lock and return a result |
| 62 | * @lock: Pointer to the lock variable | 35 | * @lock: Pointer to the lock variable |
| 63 | * | 36 | * |
| 64 | * _raw_spin_trylock() tries to get the lock and returns a result. | 37 | * __raw_spin_trylock() tries to get the lock and returns a result. |
| 65 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). | 38 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). |
| 66 | */ | 39 | */ |
| 67 | static inline int _raw_spin_trylock(spinlock_t *lock) | 40 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
| 68 | { | 41 | { |
| 69 | int oldval; | 42 | int oldval; |
| 70 | unsigned long tmp1, tmp2; | 43 | unsigned long tmp1, tmp2; |
| @@ -78,7 +51,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
| 78 | * } | 51 | * } |
| 79 | */ | 52 | */ |
| 80 | __asm__ __volatile__ ( | 53 | __asm__ __volatile__ ( |
| 81 | "# spin_trylock \n\t" | 54 | "# __raw_spin_trylock \n\t" |
| 82 | "ldi %1, #0; \n\t" | 55 | "ldi %1, #0; \n\t" |
| 83 | "mvfc %2, psw; \n\t" | 56 | "mvfc %2, psw; \n\t" |
| 84 | "clrpsw #0x40 -> nop; \n\t" | 57 | "clrpsw #0x40 -> nop; \n\t" |
| @@ -97,16 +70,10 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
| 97 | return (oldval > 0); | 70 | return (oldval > 0); |
| 98 | } | 71 | } |
| 99 | 72 | ||
| 100 | static inline void _raw_spin_lock(spinlock_t *lock) | 73 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 101 | { | 74 | { |
| 102 | unsigned long tmp0, tmp1; | 75 | unsigned long tmp0, tmp1; |
| 103 | 76 | ||
| 104 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 105 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
| 106 | printk("pc: %p\n", __builtin_return_address(0)); | ||
| 107 | BUG(); | ||
| 108 | } | ||
| 109 | #endif | ||
| 110 | /* | 77 | /* |
| 111 | * lock->slock : =1 : unlock | 78 | * lock->slock : =1 : unlock |
| 112 | * : <=0 : lock | 79 | * : <=0 : lock |
| @@ -118,7 +85,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 118 | * } | 85 | * } |
| 119 | */ | 86 | */ |
| 120 | __asm__ __volatile__ ( | 87 | __asm__ __volatile__ ( |
| 121 | "# spin_lock \n\t" | 88 | "# __raw_spin_lock \n\t" |
| 122 | ".fillinsn \n" | 89 | ".fillinsn \n" |
| 123 | "1: \n\t" | 90 | "1: \n\t" |
| 124 | "mvfc %1, psw; \n\t" | 91 | "mvfc %1, psw; \n\t" |
| @@ -145,12 +112,8 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 145 | ); | 112 | ); |
| 146 | } | 113 | } |
| 147 | 114 | ||
| 148 | static inline void _raw_spin_unlock(spinlock_t *lock) | 115 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 149 | { | 116 | { |
| 150 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 151 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
| 152 | BUG_ON(!spin_is_locked(lock)); | ||
| 153 | #endif | ||
| 154 | mb(); | 117 | mb(); |
| 155 | lock->slock = 1; | 118 | lock->slock = 1; |
| 156 | } | 119 | } |
| @@ -164,59 +127,32 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
| 164 | * can "mix" irq-safe locks - any writer needs to get a | 127 | * can "mix" irq-safe locks - any writer needs to get a |
| 165 | * irq-safe write-lock, but readers can get non-irqsafe | 128 | * irq-safe write-lock, but readers can get non-irqsafe |
| 166 | * read-locks. | 129 | * read-locks. |
| 130 | * | ||
| 131 | * On x86, we implement read-write locks as a 32-bit counter | ||
| 132 | * with the high bit (sign) being the "contended" bit. | ||
| 133 | * | ||
| 134 | * The inline assembly is non-obvious. Think about it. | ||
| 135 | * | ||
| 136 | * Changed to use the same technique as rw semaphores. See | ||
| 137 | * semaphore.h for details. -ben | ||
| 167 | */ | 138 | */ |
| 168 | typedef struct { | ||
| 169 | volatile int lock; | ||
| 170 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 171 | unsigned magic; | ||
| 172 | #endif | ||
| 173 | #ifdef CONFIG_PREEMPT | ||
| 174 | unsigned int break_lock; | ||
| 175 | #endif | ||
| 176 | } rwlock_t; | ||
| 177 | |||
| 178 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
| 179 | |||
| 180 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 181 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
| 182 | #else | ||
| 183 | #define RWLOCK_MAGIC_INIT /* */ | ||
| 184 | #endif | ||
| 185 | |||
| 186 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
| 187 | |||
| 188 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
| 189 | 139 | ||
| 190 | /** | 140 | /** |
| 191 | * read_can_lock - would read_trylock() succeed? | 141 | * read_can_lock - would read_trylock() succeed? |
| 192 | * @lock: the rwlock in question. | 142 | * @lock: the rwlock in question. |
| 193 | */ | 143 | */ |
| 194 | #define read_can_lock(x) ((int)(x)->lock > 0) | 144 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
| 195 | 145 | ||
| 196 | /** | 146 | /** |
| 197 | * write_can_lock - would write_trylock() succeed? | 147 | * write_can_lock - would write_trylock() succeed? |
| 198 | * @lock: the rwlock in question. | 148 | * @lock: the rwlock in question. |
| 199 | */ | 149 | */ |
| 200 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 150 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
| 201 | |||
| 202 | /* | ||
| 203 | * On x86, we implement read-write locks as a 32-bit counter | ||
| 204 | * with the high bit (sign) being the "contended" bit. | ||
| 205 | * | ||
| 206 | * The inline assembly is non-obvious. Think about it. | ||
| 207 | * | ||
| 208 | * Changed to use the same technique as rw semaphores. See | ||
| 209 | * semaphore.h for details. -ben | ||
| 210 | */ | ||
| 211 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
| 212 | 151 | ||
| 213 | static inline void _raw_read_lock(rwlock_t *rw) | 152 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 214 | { | 153 | { |
| 215 | unsigned long tmp0, tmp1; | 154 | unsigned long tmp0, tmp1; |
| 216 | 155 | ||
| 217 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 218 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
| 219 | #endif | ||
| 220 | /* | 156 | /* |
| 221 | * rw->lock : >0 : unlock | 157 | * rw->lock : >0 : unlock |
| 222 | * : <=0 : lock | 158 | * : <=0 : lock |
| @@ -264,13 +200,10 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
| 264 | ); | 200 | ); |
| 265 | } | 201 | } |
| 266 | 202 | ||
| 267 | static inline void _raw_write_lock(rwlock_t *rw) | 203 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 268 | { | 204 | { |
| 269 | unsigned long tmp0, tmp1, tmp2; | 205 | unsigned long tmp0, tmp1, tmp2; |
| 270 | 206 | ||
| 271 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 272 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
| 273 | #endif | ||
| 274 | /* | 207 | /* |
| 275 | * rw->lock : =RW_LOCK_BIAS_STR : unlock | 208 | * rw->lock : =RW_LOCK_BIAS_STR : unlock |
| 276 | * : !=RW_LOCK_BIAS_STR : lock | 209 | * : !=RW_LOCK_BIAS_STR : lock |
| @@ -320,7 +253,7 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
| 320 | ); | 253 | ); |
| 321 | } | 254 | } |
| 322 | 255 | ||
| 323 | static inline void _raw_read_unlock(rwlock_t *rw) | 256 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
| 324 | { | 257 | { |
| 325 | unsigned long tmp0, tmp1; | 258 | unsigned long tmp0, tmp1; |
| 326 | 259 | ||
| @@ -342,7 +275,7 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
| 342 | ); | 275 | ); |
| 343 | } | 276 | } |
| 344 | 277 | ||
| 345 | static inline void _raw_write_unlock(rwlock_t *rw) | 278 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
| 346 | { | 279 | { |
| 347 | unsigned long tmp0, tmp1, tmp2; | 280 | unsigned long tmp0, tmp1, tmp2; |
| 348 | 281 | ||
| @@ -366,9 +299,9 @@ static inline void _raw_write_unlock(rwlock_t *rw) | |||
| 366 | ); | 299 | ); |
| 367 | } | 300 | } |
| 368 | 301 | ||
| 369 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 302 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 370 | 303 | ||
| 371 | static inline int _raw_write_trylock(rwlock_t *lock) | 304 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
| 372 | { | 305 | { |
| 373 | atomic_t *count = (atomic_t *)lock; | 306 | atomic_t *count = (atomic_t *)lock; |
| 374 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 307 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
diff --git a/include/asm-m32r/spinlock_types.h b/include/asm-m32r/spinlock_types.h new file mode 100644 index 000000000000..7e9941c45f40 --- /dev/null +++ b/include/asm-m32r/spinlock_types.h | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | #ifndef _ASM_M32R_SPINLOCK_TYPES_H | ||
| 2 | #define _ASM_M32R_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile int slock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define RW_LOCK_BIAS 0x01000000 | ||
| 19 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
| 20 | |||
| 21 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
| 22 | |||
| 23 | #endif | ||
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h index 114d3eb98a6a..4d0135b11156 100644 --- a/include/asm-mips/spinlock.h +++ b/include/asm-mips/spinlock.h | |||
| @@ -16,20 +16,10 @@ | |||
| 16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | typedef struct { | 19 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 20 | volatile unsigned int lock; | 20 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 21 | #ifdef CONFIG_PREEMPT | 21 | #define __raw_spin_unlock_wait(x) \ |
| 22 | unsigned int break_lock; | 22 | do { cpu_relax(); } while ((x)->lock) |
| 23 | #endif | ||
| 24 | } spinlock_t; | ||
| 25 | |||
| 26 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
| 27 | |||
| 28 | #define spin_lock_init(x) do { (x)->lock = 0; } while(0) | ||
| 29 | |||
| 30 | #define spin_is_locked(x) ((x)->lock != 0) | ||
| 31 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | ||
| 32 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 33 | 23 | ||
| 34 | /* | 24 | /* |
| 35 | * Simple spin lock operations. There are two variants, one clears IRQ's | 25 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| @@ -38,13 +28,13 @@ typedef struct { | |||
| 38 | * We make no fairness assumptions. They have a cost. | 28 | * We make no fairness assumptions. They have a cost. |
| 39 | */ | 29 | */ |
| 40 | 30 | ||
| 41 | static inline void _raw_spin_lock(spinlock_t *lock) | 31 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 42 | { | 32 | { |
| 43 | unsigned int tmp; | 33 | unsigned int tmp; |
| 44 | 34 | ||
| 45 | if (R10000_LLSC_WAR) { | 35 | if (R10000_LLSC_WAR) { |
| 46 | __asm__ __volatile__( | 36 | __asm__ __volatile__( |
| 47 | " .set noreorder # _raw_spin_lock \n" | 37 | " .set noreorder # __raw_spin_lock \n" |
| 48 | "1: ll %1, %2 \n" | 38 | "1: ll %1, %2 \n" |
| 49 | " bnez %1, 1b \n" | 39 | " bnez %1, 1b \n" |
| 50 | " li %1, 1 \n" | 40 | " li %1, 1 \n" |
| @@ -58,7 +48,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 58 | : "memory"); | 48 | : "memory"); |
| 59 | } else { | 49 | } else { |
| 60 | __asm__ __volatile__( | 50 | __asm__ __volatile__( |
| 61 | " .set noreorder # _raw_spin_lock \n" | 51 | " .set noreorder # __raw_spin_lock \n" |
| 62 | "1: ll %1, %2 \n" | 52 | "1: ll %1, %2 \n" |
| 63 | " bnez %1, 1b \n" | 53 | " bnez %1, 1b \n" |
| 64 | " li %1, 1 \n" | 54 | " li %1, 1 \n" |
| @@ -72,10 +62,10 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 72 | } | 62 | } |
| 73 | } | 63 | } |
| 74 | 64 | ||
| 75 | static inline void _raw_spin_unlock(spinlock_t *lock) | 65 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 76 | { | 66 | { |
| 77 | __asm__ __volatile__( | 67 | __asm__ __volatile__( |
| 78 | " .set noreorder # _raw_spin_unlock \n" | 68 | " .set noreorder # __raw_spin_unlock \n" |
| 79 | " sync \n" | 69 | " sync \n" |
| 80 | " sw $0, %0 \n" | 70 | " sw $0, %0 \n" |
| 81 | " .set\treorder \n" | 71 | " .set\treorder \n" |
| @@ -84,13 +74,13 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
| 84 | : "memory"); | 74 | : "memory"); |
| 85 | } | 75 | } |
| 86 | 76 | ||
| 87 | static inline unsigned int _raw_spin_trylock(spinlock_t *lock) | 77 | static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) |
| 88 | { | 78 | { |
| 89 | unsigned int temp, res; | 79 | unsigned int temp, res; |
| 90 | 80 | ||
| 91 | if (R10000_LLSC_WAR) { | 81 | if (R10000_LLSC_WAR) { |
| 92 | __asm__ __volatile__( | 82 | __asm__ __volatile__( |
| 93 | " .set noreorder # _raw_spin_trylock \n" | 83 | " .set noreorder # __raw_spin_trylock \n" |
| 94 | "1: ll %0, %3 \n" | 84 | "1: ll %0, %3 \n" |
| 95 | " ori %2, %0, 1 \n" | 85 | " ori %2, %0, 1 \n" |
| 96 | " sc %2, %1 \n" | 86 | " sc %2, %1 \n" |
| @@ -104,7 +94,7 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock) | |||
| 104 | : "memory"); | 94 | : "memory"); |
| 105 | } else { | 95 | } else { |
| 106 | __asm__ __volatile__( | 96 | __asm__ __volatile__( |
| 107 | " .set noreorder # _raw_spin_trylock \n" | 97 | " .set noreorder # __raw_spin_trylock \n" |
| 108 | "1: ll %0, %3 \n" | 98 | "1: ll %0, %3 \n" |
| 109 | " ori %2, %0, 1 \n" | 99 | " ori %2, %0, 1 \n" |
| 110 | " sc %2, %1 \n" | 100 | " sc %2, %1 \n" |
| @@ -129,24 +119,13 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock) | |||
| 129 | * read-locks. | 119 | * read-locks. |
| 130 | */ | 120 | */ |
| 131 | 121 | ||
| 132 | typedef struct { | 122 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 133 | volatile unsigned int lock; | ||
| 134 | #ifdef CONFIG_PREEMPT | ||
| 135 | unsigned int break_lock; | ||
| 136 | #endif | ||
| 137 | } rwlock_t; | ||
| 138 | |||
| 139 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
| 140 | |||
| 141 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
| 142 | |||
| 143 | static inline void _raw_read_lock(rwlock_t *rw) | ||
| 144 | { | 123 | { |
| 145 | unsigned int tmp; | 124 | unsigned int tmp; |
| 146 | 125 | ||
| 147 | if (R10000_LLSC_WAR) { | 126 | if (R10000_LLSC_WAR) { |
| 148 | __asm__ __volatile__( | 127 | __asm__ __volatile__( |
| 149 | " .set noreorder # _raw_read_lock \n" | 128 | " .set noreorder # __raw_read_lock \n" |
| 150 | "1: ll %1, %2 \n" | 129 | "1: ll %1, %2 \n" |
| 151 | " bltz %1, 1b \n" | 130 | " bltz %1, 1b \n" |
| 152 | " addu %1, 1 \n" | 131 | " addu %1, 1 \n" |
| @@ -160,7 +139,7 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
| 160 | : "memory"); | 139 | : "memory"); |
| 161 | } else { | 140 | } else { |
| 162 | __asm__ __volatile__( | 141 | __asm__ __volatile__( |
| 163 | " .set noreorder # _raw_read_lock \n" | 142 | " .set noreorder # __raw_read_lock \n" |
| 164 | "1: ll %1, %2 \n" | 143 | "1: ll %1, %2 \n" |
| 165 | " bltz %1, 1b \n" | 144 | " bltz %1, 1b \n" |
| 166 | " addu %1, 1 \n" | 145 | " addu %1, 1 \n" |
| @@ -177,13 +156,13 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
| 177 | /* Note the use of sub, not subu which will make the kernel die with an | 156 | /* Note the use of sub, not subu which will make the kernel die with an |
| 178 | overflow exception if we ever try to unlock an rwlock that is already | 157 | overflow exception if we ever try to unlock an rwlock that is already |
| 179 | unlocked or is being held by a writer. */ | 158 | unlocked or is being held by a writer. */ |
| 180 | static inline void _raw_read_unlock(rwlock_t *rw) | 159 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
| 181 | { | 160 | { |
| 182 | unsigned int tmp; | 161 | unsigned int tmp; |
| 183 | 162 | ||
| 184 | if (R10000_LLSC_WAR) { | 163 | if (R10000_LLSC_WAR) { |
| 185 | __asm__ __volatile__( | 164 | __asm__ __volatile__( |
| 186 | "1: ll %1, %2 # _raw_read_unlock \n" | 165 | "1: ll %1, %2 # __raw_read_unlock \n" |
| 187 | " sub %1, 1 \n" | 166 | " sub %1, 1 \n" |
| 188 | " sc %1, %0 \n" | 167 | " sc %1, %0 \n" |
| 189 | " beqzl %1, 1b \n" | 168 | " beqzl %1, 1b \n" |
| @@ -193,7 +172,7 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
| 193 | : "memory"); | 172 | : "memory"); |
| 194 | } else { | 173 | } else { |
| 195 | __asm__ __volatile__( | 174 | __asm__ __volatile__( |
| 196 | " .set noreorder # _raw_read_unlock \n" | 175 | " .set noreorder # __raw_read_unlock \n" |
| 197 | "1: ll %1, %2 \n" | 176 | "1: ll %1, %2 \n" |
| 198 | " sub %1, 1 \n" | 177 | " sub %1, 1 \n" |
| 199 | " sc %1, %0 \n" | 178 | " sc %1, %0 \n" |
| @@ -206,13 +185,13 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
| 206 | } | 185 | } |
| 207 | } | 186 | } |
| 208 | 187 | ||
| 209 | static inline void _raw_write_lock(rwlock_t *rw) | 188 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 210 | { | 189 | { |
| 211 | unsigned int tmp; | 190 | unsigned int tmp; |
| 212 | 191 | ||
| 213 | if (R10000_LLSC_WAR) { | 192 | if (R10000_LLSC_WAR) { |
| 214 | __asm__ __volatile__( | 193 | __asm__ __volatile__( |
| 215 | " .set noreorder # _raw_write_lock \n" | 194 | " .set noreorder # __raw_write_lock \n" |
| 216 | "1: ll %1, %2 \n" | 195 | "1: ll %1, %2 \n" |
| 217 | " bnez %1, 1b \n" | 196 | " bnez %1, 1b \n" |
| 218 | " lui %1, 0x8000 \n" | 197 | " lui %1, 0x8000 \n" |
| @@ -226,7 +205,7 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
| 226 | : "memory"); | 205 | : "memory"); |
| 227 | } else { | 206 | } else { |
| 228 | __asm__ __volatile__( | 207 | __asm__ __volatile__( |
| 229 | " .set noreorder # _raw_write_lock \n" | 208 | " .set noreorder # __raw_write_lock \n" |
| 230 | "1: ll %1, %2 \n" | 209 | "1: ll %1, %2 \n" |
| 231 | " bnez %1, 1b \n" | 210 | " bnez %1, 1b \n" |
| 232 | " lui %1, 0x8000 \n" | 211 | " lui %1, 0x8000 \n" |
| @@ -241,26 +220,26 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
| 241 | } | 220 | } |
| 242 | } | 221 | } |
| 243 | 222 | ||
| 244 | static inline void _raw_write_unlock(rwlock_t *rw) | 223 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
| 245 | { | 224 | { |
| 246 | __asm__ __volatile__( | 225 | __asm__ __volatile__( |
| 247 | " sync # _raw_write_unlock \n" | 226 | " sync # __raw_write_unlock \n" |
| 248 | " sw $0, %0 \n" | 227 | " sw $0, %0 \n" |
| 249 | : "=m" (rw->lock) | 228 | : "=m" (rw->lock) |
| 250 | : "m" (rw->lock) | 229 | : "m" (rw->lock) |
| 251 | : "memory"); | 230 | : "memory"); |
| 252 | } | 231 | } |
| 253 | 232 | ||
| 254 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 233 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 255 | 234 | ||
| 256 | static inline int _raw_write_trylock(rwlock_t *rw) | 235 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
| 257 | { | 236 | { |
| 258 | unsigned int tmp; | 237 | unsigned int tmp; |
| 259 | int ret; | 238 | int ret; |
| 260 | 239 | ||
| 261 | if (R10000_LLSC_WAR) { | 240 | if (R10000_LLSC_WAR) { |
| 262 | __asm__ __volatile__( | 241 | __asm__ __volatile__( |
| 263 | " .set noreorder # _raw_write_trylock \n" | 242 | " .set noreorder # __raw_write_trylock \n" |
| 264 | " li %2, 0 \n" | 243 | " li %2, 0 \n" |
| 265 | "1: ll %1, %3 \n" | 244 | "1: ll %1, %3 \n" |
| 266 | " bnez %1, 2f \n" | 245 | " bnez %1, 2f \n" |
| @@ -277,7 +256,7 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
| 277 | : "memory"); | 256 | : "memory"); |
| 278 | } else { | 257 | } else { |
| 279 | __asm__ __volatile__( | 258 | __asm__ __volatile__( |
| 280 | " .set noreorder # _raw_write_trylock \n" | 259 | " .set noreorder # __raw_write_trylock \n" |
| 281 | " li %2, 0 \n" | 260 | " li %2, 0 \n" |
| 282 | "1: ll %1, %3 \n" | 261 | "1: ll %1, %3 \n" |
| 283 | " bnez %1, 2f \n" | 262 | " bnez %1, 2f \n" |
diff --git a/include/asm-mips/spinlock_types.h b/include/asm-mips/spinlock_types.h new file mode 100644 index 000000000000..ce26c5048b15 --- /dev/null +++ b/include/asm-mips/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef _ASM_SPINLOCK_TYPES_H | ||
| 2 | #define _ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h index e24f7579adb0..048a2c7fd0c0 100644 --- a/include/asm-parisc/atomic.h +++ b/include/asm-parisc/atomic.h | |||
| @@ -24,19 +24,19 @@ | |||
| 24 | # define ATOMIC_HASH_SIZE 4 | 24 | # define ATOMIC_HASH_SIZE 4 |
| 25 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | 25 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
| 26 | 26 | ||
| 27 | extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | 27 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
| 28 | 28 | ||
| 29 | /* Can't use _raw_spin_lock_irq because of #include problems, so | 29 | /* Can't use raw_spin_lock_irq because of #include problems, so |
| 30 | * this is the substitute */ | 30 | * this is the substitute */ |
| 31 | #define _atomic_spin_lock_irqsave(l,f) do { \ | 31 | #define _atomic_spin_lock_irqsave(l,f) do { \ |
| 32 | spinlock_t *s = ATOMIC_HASH(l); \ | 32 | raw_spinlock_t *s = ATOMIC_HASH(l); \ |
| 33 | local_irq_save(f); \ | 33 | local_irq_save(f); \ |
| 34 | _raw_spin_lock(s); \ | 34 | __raw_spin_lock(s); \ |
| 35 | } while(0) | 35 | } while(0) |
| 36 | 36 | ||
| 37 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | 37 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ |
| 38 | spinlock_t *s = ATOMIC_HASH(l); \ | 38 | raw_spinlock_t *s = ATOMIC_HASH(l); \ |
| 39 | _raw_spin_unlock(s); \ | 39 | __raw_spin_unlock(s); \ |
| 40 | local_irq_restore(f); \ | 40 | local_irq_restore(f); \ |
| 41 | } while(0) | 41 | } while(0) |
| 42 | 42 | ||
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h index 928e5ef850bd..af7db694b22d 100644 --- a/include/asm-parisc/bitops.h +++ b/include/asm-parisc/bitops.h | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | #define _PARISC_BITOPS_H | 2 | #define _PARISC_BITOPS_H |
| 3 | 3 | ||
| 4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
| 5 | #include <asm/system.h> | 5 | #include <asm/spinlock.h> |
| 6 | #include <asm/byteorder.h> | 6 | #include <asm/byteorder.h> |
| 7 | #include <asm/atomic.h> | 7 | #include <asm/atomic.h> |
| 8 | 8 | ||
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h index 06732719d927..aa592d8c0e39 100644 --- a/include/asm-parisc/cacheflush.h +++ b/include/asm-parisc/cacheflush.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/config.h> | 4 | #include <linux/config.h> |
| 5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
| 6 | #include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */ | ||
| 6 | 7 | ||
| 7 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". | 8 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". |
| 8 | * Unfortunately, that doesn't apply to PA-RISC. */ | 9 | * Unfortunately, that doesn't apply to PA-RISC. */ |
diff --git a/include/asm-parisc/processor.h b/include/asm-parisc/processor.h index 0b61f51d8467..a9dfadd05658 100644 --- a/include/asm-parisc/processor.h +++ b/include/asm-parisc/processor.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
| 12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
| 13 | #include <linux/threads.h> | 13 | #include <linux/threads.h> |
| 14 | #include <linux/spinlock_types.h> | ||
| 14 | 15 | ||
| 15 | #include <asm/hardware.h> | 16 | #include <asm/hardware.h> |
| 16 | #include <asm/page.h> | 17 | #include <asm/page.h> |
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h index 679ea1c651ef..43eaa6e742e0 100644 --- a/include/asm-parisc/spinlock.h +++ b/include/asm-parisc/spinlock.h | |||
| @@ -2,30 +2,25 @@ | |||
| 2 | #define __ASM_SPINLOCK_H | 2 | #define __ASM_SPINLOCK_H |
| 3 | 3 | ||
| 4 | #include <asm/system.h> | 4 | #include <asm/system.h> |
| 5 | #include <asm/processor.h> | ||
| 6 | #include <asm/spinlock_types.h> | ||
| 5 | 7 | ||
| 6 | /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked | 8 | /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked |
| 7 | * since it only has load-and-zero. Moreover, at least on some PA processors, | 9 | * since it only has load-and-zero. Moreover, at least on some PA processors, |
| 8 | * the semaphore address has to be 16-byte aligned. | 10 | * the semaphore address has to be 16-byte aligned. |
| 9 | */ | 11 | */ |
| 10 | 12 | ||
| 11 | #ifndef CONFIG_DEBUG_SPINLOCK | 13 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
| 12 | |||
| 13 | #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
| 14 | #undef SPIN_LOCK_UNLOCKED | ||
| 15 | #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED | ||
| 16 | |||
| 17 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 18 | |||
| 19 | static inline int spin_is_locked(spinlock_t *x) | ||
| 20 | { | 14 | { |
| 21 | volatile unsigned int *a = __ldcw_align(x); | 15 | volatile unsigned int *a = __ldcw_align(x); |
| 22 | return *a == 0; | 16 | return *a == 0; |
| 23 | } | 17 | } |
| 24 | 18 | ||
| 25 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 19 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 26 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 20 | #define __raw_spin_unlock_wait(x) \ |
| 21 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | ||
| 27 | 22 | ||
| 28 | static inline void _raw_spin_lock(spinlock_t *x) | 23 | static inline void __raw_spin_lock(raw_spinlock_t *x) |
| 29 | { | 24 | { |
| 30 | volatile unsigned int *a; | 25 | volatile unsigned int *a; |
| 31 | 26 | ||
| @@ -36,7 +31,7 @@ static inline void _raw_spin_lock(spinlock_t *x) | |||
| 36 | mb(); | 31 | mb(); |
| 37 | } | 32 | } |
| 38 | 33 | ||
| 39 | static inline void _raw_spin_unlock(spinlock_t *x) | 34 | static inline void __raw_spin_unlock(raw_spinlock_t *x) |
| 40 | { | 35 | { |
| 41 | volatile unsigned int *a; | 36 | volatile unsigned int *a; |
| 42 | mb(); | 37 | mb(); |
| @@ -45,7 +40,7 @@ static inline void _raw_spin_unlock(spinlock_t *x) | |||
| 45 | mb(); | 40 | mb(); |
| 46 | } | 41 | } |
| 47 | 42 | ||
| 48 | static inline int _raw_spin_trylock(spinlock_t *x) | 43 | static inline int __raw_spin_trylock(raw_spinlock_t *x) |
| 49 | { | 44 | { |
| 50 | volatile unsigned int *a; | 45 | volatile unsigned int *a; |
| 51 | int ret; | 46 | int ret; |
| @@ -57,131 +52,38 @@ static inline int _raw_spin_trylock(spinlock_t *x) | |||
| 57 | 52 | ||
| 58 | return ret; | 53 | return ret; |
| 59 | } | 54 | } |
| 60 | |||
| 61 | #define spin_lock_own(LOCK, LOCATION) ((void)0) | ||
| 62 | |||
| 63 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
| 64 | |||
| 65 | #define SPINLOCK_MAGIC 0x1D244B3C | ||
| 66 | |||
| 67 | #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL } | ||
| 68 | #undef SPIN_LOCK_UNLOCKED | ||
| 69 | #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED | ||
| 70 | |||
| 71 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 72 | |||
| 73 | #define CHECK_LOCK(x) \ | ||
| 74 | do { \ | ||
| 75 | if (unlikely((x)->magic != SPINLOCK_MAGIC)) { \ | ||
| 76 | printk(KERN_ERR "%s:%d: spin_is_locked" \ | ||
| 77 | " on uninitialized spinlock %p.\n", \ | ||
| 78 | __FILE__, __LINE__, (x)); \ | ||
| 79 | } \ | ||
| 80 | } while(0) | ||
| 81 | |||
| 82 | #define spin_is_locked(x) \ | ||
| 83 | ({ \ | ||
| 84 | CHECK_LOCK(x); \ | ||
| 85 | volatile unsigned int *a = __ldcw_align(x); \ | ||
| 86 | if (unlikely((*a == 0) && (x)->babble)) { \ | ||
| 87 | (x)->babble--; \ | ||
| 88 | printk("KERN_WARNING \ | ||
| 89 | %s:%d: spin_is_locked(%s/%p) already" \ | ||
| 90 | " locked by %s:%d in %s at %p(%d)\n", \ | ||
| 91 | __FILE__,__LINE__, (x)->module, (x), \ | ||
| 92 | (x)->bfile, (x)->bline, (x)->task->comm,\ | ||
| 93 | (x)->previous, (x)->oncpu); \ | ||
| 94 | } \ | ||
| 95 | *a == 0; \ | ||
| 96 | }) | ||
| 97 | |||
| 98 | #define spin_unlock_wait(x) \ | ||
| 99 | do { \ | ||
| 100 | CHECK_LOCK(x); \ | ||
| 101 | volatile unsigned int *a = __ldcw_align(x); \ | ||
| 102 | if (unlikely((*a == 0) && (x)->babble)) { \ | ||
| 103 | (x)->babble--; \ | ||
| 104 | printk("KERN_WARNING \ | ||
| 105 | %s:%d: spin_unlock_wait(%s/%p)" \ | ||
| 106 | " owned by %s:%d in %s at %p(%d)\n", \ | ||
| 107 | __FILE__,__LINE__, (x)->module, (x), \ | ||
| 108 | (x)->bfile, (x)->bline, (x)->task->comm,\ | ||
| 109 | (x)->previous, (x)->oncpu); \ | ||
| 110 | } \ | ||
| 111 | barrier(); \ | ||
| 112 | } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0) | ||
| 113 | |||
| 114 | extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no); | ||
| 115 | extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int); | ||
| 116 | extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int); | ||
| 117 | |||
| 118 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 119 | |||
| 120 | #define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__) | ||
| 121 | #define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__) | ||
| 122 | #define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__) | ||
| 123 | |||
| 124 | /* just in case we need it */ | ||
| 125 | #define spin_lock_own(LOCK, LOCATION) \ | ||
| 126 | do { \ | ||
| 127 | volatile unsigned int *a = __ldcw_align(LOCK); \ | ||
| 128 | if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id()))) \ | ||
| 129 | printk("KERN_WARNING \ | ||
| 130 | %s: called on %d from %p but lock %s on %d\n", \ | ||
| 131 | LOCATION, smp_processor_id(), \ | ||
| 132 | __builtin_return_address(0), \ | ||
| 133 | (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \ | ||
| 134 | } while (0) | ||
| 135 | |||
| 136 | #endif /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
| 137 | 55 | ||
| 138 | /* | 56 | /* |
| 139 | * Read-write spinlocks, allowing multiple readers | 57 | * Read-write spinlocks, allowing multiple readers |
| 140 | * but only one writer. | 58 | * but only one writer. |
| 141 | */ | 59 | */ |
| 142 | typedef struct { | ||
| 143 | spinlock_t lock; | ||
| 144 | volatile int counter; | ||
| 145 | #ifdef CONFIG_PREEMPT | ||
| 146 | unsigned int break_lock; | ||
| 147 | #endif | ||
| 148 | } rwlock_t; | ||
| 149 | |||
| 150 | #define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 } | ||
| 151 | |||
| 152 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0) | ||
| 153 | 60 | ||
| 154 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 61 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 155 | 62 | ||
| 156 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow | 63 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow |
| 157 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ | 64 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ |
| 158 | 65 | ||
| 159 | #ifdef CONFIG_DEBUG_RWLOCK | 66 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
| 160 | extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline); | ||
| 161 | #define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__) | ||
| 162 | #else | ||
| 163 | static __inline__ void _raw_read_lock(rwlock_t *rw) | ||
| 164 | { | 67 | { |
| 165 | unsigned long flags; | 68 | unsigned long flags; |
| 166 | local_irq_save(flags); | 69 | local_irq_save(flags); |
| 167 | _raw_spin_lock(&rw->lock); | 70 | __raw_spin_lock(&rw->lock); |
| 168 | 71 | ||
| 169 | rw->counter++; | 72 | rw->counter++; |
| 170 | 73 | ||
| 171 | _raw_spin_unlock(&rw->lock); | 74 | __raw_spin_unlock(&rw->lock); |
| 172 | local_irq_restore(flags); | 75 | local_irq_restore(flags); |
| 173 | } | 76 | } |
| 174 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
| 175 | 77 | ||
| 176 | static __inline__ void _raw_read_unlock(rwlock_t *rw) | 78 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
| 177 | { | 79 | { |
| 178 | unsigned long flags; | 80 | unsigned long flags; |
| 179 | local_irq_save(flags); | 81 | local_irq_save(flags); |
| 180 | _raw_spin_lock(&rw->lock); | 82 | __raw_spin_lock(&rw->lock); |
| 181 | 83 | ||
| 182 | rw->counter--; | 84 | rw->counter--; |
| 183 | 85 | ||
| 184 | _raw_spin_unlock(&rw->lock); | 86 | __raw_spin_unlock(&rw->lock); |
| 185 | local_irq_restore(flags); | 87 | local_irq_restore(flags); |
| 186 | } | 88 | } |
| 187 | 89 | ||
| @@ -194,20 +96,17 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw) | |||
| 194 | * writers) in interrupt handlers someone fucked up and we'd dead-lock | 96 | * writers) in interrupt handlers someone fucked up and we'd dead-lock |
| 195 | * sooner or later anyway. prumpf */ | 97 | * sooner or later anyway. prumpf */ |
| 196 | 98 | ||
| 197 | #ifdef CONFIG_DEBUG_RWLOCK | 99 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
| 198 | extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline); | ||
| 199 | #define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__) | ||
| 200 | #else | ||
| 201 | static __inline__ void _raw_write_lock(rwlock_t *rw) | ||
| 202 | { | 100 | { |
| 203 | retry: | 101 | retry: |
| 204 | _raw_spin_lock(&rw->lock); | 102 | __raw_spin_lock(&rw->lock); |
| 205 | 103 | ||
| 206 | if(rw->counter != 0) { | 104 | if(rw->counter != 0) { |
| 207 | /* this basically never happens */ | 105 | /* this basically never happens */ |
| 208 | _raw_spin_unlock(&rw->lock); | 106 | __raw_spin_unlock(&rw->lock); |
| 209 | 107 | ||
| 210 | while(rw->counter != 0); | 108 | while (rw->counter != 0) |
| 109 | cpu_relax(); | ||
| 211 | 110 | ||
| 212 | goto retry; | 111 | goto retry; |
| 213 | } | 112 | } |
| @@ -215,26 +114,21 @@ retry: | |||
| 215 | /* got it. now leave without unlocking */ | 114 | /* got it. now leave without unlocking */ |
| 216 | rw->counter = -1; /* remember we are locked */ | 115 | rw->counter = -1; /* remember we are locked */ |
| 217 | } | 116 | } |
| 218 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
| 219 | 117 | ||
| 220 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ | 118 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ |
| 221 | 119 | ||
| 222 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | 120 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
| 223 | { | 121 | { |
| 224 | rw->counter = 0; | 122 | rw->counter = 0; |
| 225 | _raw_spin_unlock(&rw->lock); | 123 | __raw_spin_unlock(&rw->lock); |
| 226 | } | 124 | } |
| 227 | 125 | ||
| 228 | #ifdef CONFIG_DEBUG_RWLOCK | 126 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
| 229 | extern int _dbg_write_trylock(rwlock_t * rw, const char *bfile, int bline); | ||
| 230 | #define _raw_write_trylock(rw) _dbg_write_trylock(rw, __FILE__, __LINE__) | ||
| 231 | #else | ||
| 232 | static __inline__ int _raw_write_trylock(rwlock_t *rw) | ||
| 233 | { | 127 | { |
| 234 | _raw_spin_lock(&rw->lock); | 128 | __raw_spin_lock(&rw->lock); |
| 235 | if (rw->counter != 0) { | 129 | if (rw->counter != 0) { |
| 236 | /* this basically never happens */ | 130 | /* this basically never happens */ |
| 237 | _raw_spin_unlock(&rw->lock); | 131 | __raw_spin_unlock(&rw->lock); |
| 238 | 132 | ||
| 239 | return 0; | 133 | return 0; |
| 240 | } | 134 | } |
| @@ -243,14 +137,13 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw) | |||
| 243 | rw->counter = -1; /* remember we are locked */ | 137 | rw->counter = -1; /* remember we are locked */ |
| 244 | return 1; | 138 | return 1; |
| 245 | } | 139 | } |
| 246 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
| 247 | 140 | ||
| 248 | static __inline__ int is_read_locked(rwlock_t *rw) | 141 | static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw) |
| 249 | { | 142 | { |
| 250 | return rw->counter > 0; | 143 | return rw->counter > 0; |
| 251 | } | 144 | } |
| 252 | 145 | ||
| 253 | static __inline__ int is_write_locked(rwlock_t *rw) | 146 | static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw) |
| 254 | { | 147 | { |
| 255 | return rw->counter < 0; | 148 | return rw->counter < 0; |
| 256 | } | 149 | } |
diff --git a/include/asm-parisc/spinlock_types.h b/include/asm-parisc/spinlock_types.h new file mode 100644 index 000000000000..785bba822fbf --- /dev/null +++ b/include/asm-parisc/spinlock_types.h | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int lock[4]; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | raw_spinlock_t lock; | ||
| 16 | volatile int counter; | ||
| 17 | } raw_rwlock_t; | ||
| 18 | |||
| 19 | #define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } | ||
| 20 | |||
| 21 | #endif | ||
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index 81c543339036..26ff844a21c1 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h | |||
| @@ -160,29 +160,7 @@ static inline void set_eiem(unsigned long val) | |||
| 160 | }) | 160 | }) |
| 161 | 161 | ||
| 162 | #ifdef CONFIG_SMP | 162 | #ifdef CONFIG_SMP |
| 163 | /* | 163 | # define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) |
| 164 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
| 165 | */ | ||
| 166 | |||
| 167 | typedef struct { | ||
| 168 | volatile unsigned int lock[4]; | ||
| 169 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 170 | unsigned long magic; | ||
| 171 | volatile unsigned int babble; | ||
| 172 | const char *module; | ||
| 173 | char *bfile; | ||
| 174 | int bline; | ||
| 175 | int oncpu; | ||
| 176 | void *previous; | ||
| 177 | struct task_struct * task; | ||
| 178 | #endif | ||
| 179 | #ifdef CONFIG_PREEMPT | ||
| 180 | unsigned int break_lock; | ||
| 181 | #endif | ||
| 182 | } spinlock_t; | ||
| 183 | |||
| 184 | #define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) | ||
| 185 | |||
| 186 | #endif | 164 | #endif |
| 187 | 165 | ||
| 188 | #define KERNEL_START (0x10100000 - 0x1000) | 166 | #define KERNEL_START (0x10100000 - 0x1000) |
diff --git a/include/asm-ppc/smp.h b/include/asm-ppc/smp.h index 17530c232c76..829481c0a9dc 100644 --- a/include/asm-ppc/smp.h +++ b/include/asm-ppc/smp.h | |||
| @@ -41,6 +41,10 @@ extern void smp_send_xmon_break(int cpu); | |||
| 41 | struct pt_regs; | 41 | struct pt_regs; |
| 42 | extern void smp_message_recv(int, struct pt_regs *); | 42 | extern void smp_message_recv(int, struct pt_regs *); |
| 43 | 43 | ||
| 44 | extern int __cpu_disable(void); | ||
| 45 | extern void __cpu_die(unsigned int cpu); | ||
| 46 | extern void cpu_die(void) __attribute__((noreturn)); | ||
| 47 | |||
| 44 | #define NO_PROC_ID 0xFF /* No processor magic marker */ | 48 | #define NO_PROC_ID 0xFF /* No processor magic marker */ |
| 45 | #define PROC_CHANGE_PENALTY 20 | 49 | #define PROC_CHANGE_PENALTY 20 |
| 46 | 50 | ||
| @@ -64,6 +68,8 @@ extern struct klock_info_struct klock_info; | |||
| 64 | 68 | ||
| 65 | #else /* !(CONFIG_SMP) */ | 69 | #else /* !(CONFIG_SMP) */ |
| 66 | 70 | ||
| 71 | static inline void cpu_die(void) { } | ||
| 72 | |||
| 67 | #endif /* !(CONFIG_SMP) */ | 73 | #endif /* !(CONFIG_SMP) */ |
| 68 | 74 | ||
| 69 | #endif /* !(_PPC_SMP_H) */ | 75 | #endif /* !(_PPC_SMP_H) */ |
diff --git a/include/asm-ppc/spinlock.h b/include/asm-ppc/spinlock.h index 909199aae104..20edcf2a6e0c 100644 --- a/include/asm-ppc/spinlock.h +++ b/include/asm-ppc/spinlock.h | |||
| @@ -5,41 +5,21 @@ | |||
| 5 | 5 | ||
| 6 | /* | 6 | /* |
| 7 | * Simple spin lock operations. | 7 | * Simple spin lock operations. |
| 8 | * | ||
| 9 | * (the type definitions are in asm/raw_spinlock_types.h) | ||
| 8 | */ | 10 | */ |
| 9 | 11 | ||
| 10 | typedef struct { | 12 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 11 | volatile unsigned long lock; | 13 | #define __raw_spin_unlock_wait(lock) \ |
| 12 | #ifdef CONFIG_DEBUG_SPINLOCK | 14 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
| 13 | volatile unsigned long owner_pc; | 15 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 14 | volatile unsigned long owner_cpu; | 16 | |
| 15 | #endif | 17 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 16 | #ifdef CONFIG_PREEMPT | ||
| 17 | unsigned int break_lock; | ||
| 18 | #endif | ||
| 19 | } spinlock_t; | ||
| 20 | |||
| 21 | #ifdef __KERNEL__ | ||
| 22 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 23 | #define SPINLOCK_DEBUG_INIT , 0, 0 | ||
| 24 | #else | ||
| 25 | #define SPINLOCK_DEBUG_INIT /* */ | ||
| 26 | #endif | ||
| 27 | |||
| 28 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 SPINLOCK_DEBUG_INIT } | ||
| 29 | |||
| 30 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 31 | #define spin_is_locked(x) ((x)->lock != 0) | ||
| 32 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | ||
| 33 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 34 | |||
| 35 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
| 36 | |||
| 37 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
| 38 | { | 18 | { |
| 39 | unsigned long tmp; | 19 | unsigned long tmp; |
| 40 | 20 | ||
| 41 | __asm__ __volatile__( | 21 | __asm__ __volatile__( |
| 42 | "b 1f # spin_lock\n\ | 22 | "b 1f # __raw_spin_lock\n\ |
| 43 | 2: lwzx %0,0,%1\n\ | 23 | 2: lwzx %0,0,%1\n\ |
| 44 | cmpwi 0,%0,0\n\ | 24 | cmpwi 0,%0,0\n\ |
| 45 | bne+ 2b\n\ | 25 | bne+ 2b\n\ |
| @@ -55,21 +35,13 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 55 | : "cr0", "memory"); | 35 | : "cr0", "memory"); |
| 56 | } | 36 | } |
| 57 | 37 | ||
| 58 | static inline void _raw_spin_unlock(spinlock_t *lock) | 38 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 59 | { | 39 | { |
| 60 | __asm__ __volatile__("eieio # spin_unlock": : :"memory"); | 40 | __asm__ __volatile__("eieio # __raw_spin_unlock": : :"memory"); |
| 61 | lock->lock = 0; | 41 | lock->lock = 0; |
| 62 | } | 42 | } |
| 63 | 43 | ||
| 64 | #define _raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock)) | 44 | #define __raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock)) |
| 65 | |||
| 66 | #else | ||
| 67 | |||
| 68 | extern void _raw_spin_lock(spinlock_t *lock); | ||
| 69 | extern void _raw_spin_unlock(spinlock_t *lock); | ||
| 70 | extern int _raw_spin_trylock(spinlock_t *lock); | ||
| 71 | |||
| 72 | #endif | ||
| 73 | 45 | ||
| 74 | /* | 46 | /* |
| 75 | * Read-write spinlocks, allowing multiple readers | 47 | * Read-write spinlocks, allowing multiple readers |
| @@ -81,22 +53,11 @@ extern int _raw_spin_trylock(spinlock_t *lock); | |||
| 81 | * irq-safe write-lock, but readers can get non-irqsafe | 53 | * irq-safe write-lock, but readers can get non-irqsafe |
| 82 | * read-locks. | 54 | * read-locks. |
| 83 | */ | 55 | */ |
| 84 | typedef struct { | ||
| 85 | volatile signed int lock; | ||
| 86 | #ifdef CONFIG_PREEMPT | ||
| 87 | unsigned int break_lock; | ||
| 88 | #endif | ||
| 89 | } rwlock_t; | ||
| 90 | 56 | ||
| 91 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | 57 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) |
| 92 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | 58 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
| 93 | 59 | ||
| 94 | #define read_can_lock(rw) ((rw)->lock >= 0) | 60 | static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) |
| 95 | #define write_can_lock(rw) (!(rw)->lock) | ||
| 96 | |||
| 97 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
| 98 | |||
| 99 | static __inline__ int _raw_read_trylock(rwlock_t *rw) | ||
| 100 | { | 61 | { |
| 101 | signed int tmp; | 62 | signed int tmp; |
| 102 | 63 | ||
| @@ -116,7 +77,7 @@ static __inline__ int _raw_read_trylock(rwlock_t *rw) | |||
| 116 | return tmp > 0; | 77 | return tmp > 0; |
| 117 | } | 78 | } |
| 118 | 79 | ||
| 119 | static __inline__ void _raw_read_lock(rwlock_t *rw) | 80 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
| 120 | { | 81 | { |
| 121 | signed int tmp; | 82 | signed int tmp; |
| 122 | 83 | ||
| @@ -137,7 +98,7 @@ static __inline__ void _raw_read_lock(rwlock_t *rw) | |||
| 137 | : "cr0", "memory"); | 98 | : "cr0", "memory"); |
| 138 | } | 99 | } |
| 139 | 100 | ||
| 140 | static __inline__ void _raw_read_unlock(rwlock_t *rw) | 101 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
| 141 | { | 102 | { |
| 142 | signed int tmp; | 103 | signed int tmp; |
| 143 | 104 | ||
| @@ -153,7 +114,7 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw) | |||
| 153 | : "cr0", "memory"); | 114 | : "cr0", "memory"); |
| 154 | } | 115 | } |
| 155 | 116 | ||
| 156 | static __inline__ int _raw_write_trylock(rwlock_t *rw) | 117 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
| 157 | { | 118 | { |
| 158 | signed int tmp; | 119 | signed int tmp; |
| 159 | 120 | ||
| @@ -173,7 +134,7 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw) | |||
| 173 | return tmp == 0; | 134 | return tmp == 0; |
| 174 | } | 135 | } |
| 175 | 136 | ||
| 176 | static __inline__ void _raw_write_lock(rwlock_t *rw) | 137 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
| 177 | { | 138 | { |
| 178 | signed int tmp; | 139 | signed int tmp; |
| 179 | 140 | ||
| @@ -194,22 +155,10 @@ static __inline__ void _raw_write_lock(rwlock_t *rw) | |||
| 194 | : "cr0", "memory"); | 155 | : "cr0", "memory"); |
| 195 | } | 156 | } |
| 196 | 157 | ||
| 197 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | 158 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
| 198 | { | 159 | { |
| 199 | __asm__ __volatile__("eieio # write_unlock": : :"memory"); | 160 | __asm__ __volatile__("eieio # write_unlock": : :"memory"); |
| 200 | rw->lock = 0; | 161 | rw->lock = 0; |
| 201 | } | 162 | } |
| 202 | 163 | ||
| 203 | #else | ||
| 204 | |||
| 205 | extern void _raw_read_lock(rwlock_t *rw); | ||
| 206 | extern void _raw_read_unlock(rwlock_t *rw); | ||
| 207 | extern void _raw_write_lock(rwlock_t *rw); | ||
| 208 | extern void _raw_write_unlock(rwlock_t *rw); | ||
| 209 | extern int _raw_read_trylock(rwlock_t *rw); | ||
| 210 | extern int _raw_write_trylock(rwlock_t *rw); | ||
| 211 | |||
| 212 | #endif | ||
| 213 | |||
| 214 | #endif /* __ASM_SPINLOCK_H */ | 164 | #endif /* __ASM_SPINLOCK_H */ |
| 215 | #endif /* __KERNEL__ */ | ||
diff --git a/include/asm-ppc/spinlock_types.h b/include/asm-ppc/spinlock_types.h new file mode 100644 index 000000000000..7919ccc75b8a --- /dev/null +++ b/include/asm-ppc/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned long lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile signed int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h index 513a334c5810..d754ab570fe0 100644 --- a/include/asm-ppc/system.h +++ b/include/asm-ppc/system.h | |||
| @@ -88,6 +88,7 @@ extern void *cacheable_memcpy(void *, const void *, unsigned int); | |||
| 88 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); | 88 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); |
| 89 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); | 89 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); |
| 90 | extern void die(const char *, struct pt_regs *, long); | 90 | extern void die(const char *, struct pt_regs *, long); |
| 91 | extern void _exception(int, struct pt_regs *, int, unsigned long); | ||
| 91 | #ifdef CONFIG_BOOKE_WDT | 92 | #ifdef CONFIG_BOOKE_WDT |
| 92 | extern u32 booke_wdt_enabled; | 93 | extern u32 booke_wdt_enabled; |
| 93 | extern u32 booke_wdt_period; | 94 | extern u32 booke_wdt_period; |
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h index acd11564dd75..14cb895bb607 100644 --- a/include/asm-ppc64/spinlock.h +++ b/include/asm-ppc64/spinlock.h | |||
| @@ -15,36 +15,42 @@ | |||
| 15 | * modify it under the terms of the GNU General Public License | 15 | * modify it under the terms of the GNU General Public License |
| 16 | * as published by the Free Software Foundation; either version | 16 | * as published by the Free Software Foundation; either version |
| 17 | * 2 of the License, or (at your option) any later version. | 17 | * 2 of the License, or (at your option) any later version. |
| 18 | * | ||
| 19 | * (the type definitions are in asm/spinlock_types.h) | ||
| 18 | */ | 20 | */ |
| 19 | #include <linux/config.h> | 21 | #include <linux/config.h> |
| 20 | #include <asm/paca.h> | 22 | #include <asm/paca.h> |
| 21 | #include <asm/hvcall.h> | 23 | #include <asm/hvcall.h> |
| 22 | #include <asm/iSeries/HvCall.h> | 24 | #include <asm/iSeries/HvCall.h> |
| 23 | 25 | ||
| 24 | typedef struct { | 26 | #define __raw_spin_is_locked(x) ((x)->slock != 0) |
| 25 | volatile unsigned int lock; | ||
| 26 | #ifdef CONFIG_PREEMPT | ||
| 27 | unsigned int break_lock; | ||
| 28 | #endif | ||
| 29 | } spinlock_t; | ||
| 30 | 27 | ||
| 31 | typedef struct { | 28 | /* |
| 32 | volatile signed int lock; | 29 | * This returns the old value in the lock, so we succeeded |
| 33 | #ifdef CONFIG_PREEMPT | 30 | * in getting the lock if the return value is 0. |
| 34 | unsigned int break_lock; | 31 | */ |
| 35 | #endif | 32 | static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) |
| 36 | } rwlock_t; | 33 | { |
| 34 | unsigned long tmp, tmp2; | ||
| 37 | 35 | ||
| 38 | #ifdef __KERNEL__ | 36 | __asm__ __volatile__( |
| 39 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | 37 | " lwz %1,%3(13) # __spin_trylock\n\ |
| 38 | 1: lwarx %0,0,%2\n\ | ||
| 39 | cmpwi 0,%0,0\n\ | ||
| 40 | bne- 2f\n\ | ||
| 41 | stwcx. %1,0,%2\n\ | ||
| 42 | bne- 1b\n\ | ||
| 43 | isync\n\ | ||
| 44 | 2:" : "=&r" (tmp), "=&r" (tmp2) | ||
| 45 | : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token)) | ||
| 46 | : "cr0", "memory"); | ||
| 40 | 47 | ||
| 41 | #define spin_is_locked(x) ((x)->lock != 0) | 48 | return tmp; |
| 42 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | 49 | } |
| 43 | 50 | ||
| 44 | static __inline__ void _raw_spin_unlock(spinlock_t *lock) | 51 | static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) |
| 45 | { | 52 | { |
| 46 | __asm__ __volatile__("lwsync # spin_unlock": : :"memory"); | 53 | return __spin_trylock(lock) == 0; |
| 47 | lock->lock = 0; | ||
| 48 | } | 54 | } |
| 49 | 55 | ||
| 50 | /* | 56 | /* |
| @@ -64,44 +70,15 @@ static __inline__ void _raw_spin_unlock(spinlock_t *lock) | |||
| 64 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | 70 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) |
| 65 | /* We only yield to the hypervisor if we are in shared processor mode */ | 71 | /* We only yield to the hypervisor if we are in shared processor mode */ |
| 66 | #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) | 72 | #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) |
| 67 | extern void __spin_yield(spinlock_t *lock); | 73 | extern void __spin_yield(raw_spinlock_t *lock); |
| 68 | extern void __rw_yield(rwlock_t *lock); | 74 | extern void __rw_yield(raw_rwlock_t *lock); |
| 69 | #else /* SPLPAR || ISERIES */ | 75 | #else /* SPLPAR || ISERIES */ |
| 70 | #define __spin_yield(x) barrier() | 76 | #define __spin_yield(x) barrier() |
| 71 | #define __rw_yield(x) barrier() | 77 | #define __rw_yield(x) barrier() |
| 72 | #define SHARED_PROCESSOR 0 | 78 | #define SHARED_PROCESSOR 0 |
| 73 | #endif | 79 | #endif |
| 74 | extern void spin_unlock_wait(spinlock_t *lock); | ||
| 75 | |||
| 76 | /* | ||
| 77 | * This returns the old value in the lock, so we succeeded | ||
| 78 | * in getting the lock if the return value is 0. | ||
| 79 | */ | ||
| 80 | static __inline__ unsigned long __spin_trylock(spinlock_t *lock) | ||
| 81 | { | ||
| 82 | unsigned long tmp, tmp2; | ||
| 83 | |||
| 84 | __asm__ __volatile__( | ||
| 85 | " lwz %1,%3(13) # __spin_trylock\n\ | ||
| 86 | 1: lwarx %0,0,%2\n\ | ||
| 87 | cmpwi 0,%0,0\n\ | ||
| 88 | bne- 2f\n\ | ||
| 89 | stwcx. %1,0,%2\n\ | ||
| 90 | bne- 1b\n\ | ||
| 91 | isync\n\ | ||
| 92 | 2:" : "=&r" (tmp), "=&r" (tmp2) | ||
| 93 | : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token)) | ||
| 94 | : "cr0", "memory"); | ||
| 95 | |||
| 96 | return tmp; | ||
| 97 | } | ||
| 98 | |||
| 99 | static int __inline__ _raw_spin_trylock(spinlock_t *lock) | ||
| 100 | { | ||
| 101 | return __spin_trylock(lock) == 0; | ||
| 102 | } | ||
| 103 | 80 | ||
| 104 | static void __inline__ _raw_spin_lock(spinlock_t *lock) | 81 | static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) |
| 105 | { | 82 | { |
| 106 | while (1) { | 83 | while (1) { |
| 107 | if (likely(__spin_trylock(lock) == 0)) | 84 | if (likely(__spin_trylock(lock) == 0)) |
| @@ -110,12 +87,12 @@ static void __inline__ _raw_spin_lock(spinlock_t *lock) | |||
| 110 | HMT_low(); | 87 | HMT_low(); |
| 111 | if (SHARED_PROCESSOR) | 88 | if (SHARED_PROCESSOR) |
| 112 | __spin_yield(lock); | 89 | __spin_yield(lock); |
| 113 | } while (unlikely(lock->lock != 0)); | 90 | } while (unlikely(lock->slock != 0)); |
| 114 | HMT_medium(); | 91 | HMT_medium(); |
| 115 | } | 92 | } |
| 116 | } | 93 | } |
| 117 | 94 | ||
| 118 | static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | 95 | static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
| 119 | { | 96 | { |
| 120 | unsigned long flags_dis; | 97 | unsigned long flags_dis; |
| 121 | 98 | ||
| @@ -128,12 +105,20 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag | |||
| 128 | HMT_low(); | 105 | HMT_low(); |
| 129 | if (SHARED_PROCESSOR) | 106 | if (SHARED_PROCESSOR) |
| 130 | __spin_yield(lock); | 107 | __spin_yield(lock); |
| 131 | } while (unlikely(lock->lock != 0)); | 108 | } while (unlikely(lock->slock != 0)); |
| 132 | HMT_medium(); | 109 | HMT_medium(); |
| 133 | local_irq_restore(flags_dis); | 110 | local_irq_restore(flags_dis); |
| 134 | } | 111 | } |
| 135 | } | 112 | } |
| 136 | 113 | ||
| 114 | static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) | ||
| 115 | { | ||
| 116 | __asm__ __volatile__("lwsync # __raw_spin_unlock": : :"memory"); | ||
| 117 | lock->slock = 0; | ||
| 118 | } | ||
| 119 | |||
| 120 | extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | ||
| 121 | |||
| 137 | /* | 122 | /* |
| 138 | * Read-write spinlocks, allowing multiple readers | 123 | * Read-write spinlocks, allowing multiple readers |
| 139 | * but only one writer. | 124 | * but only one writer. |
| @@ -144,24 +129,15 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag | |||
| 144 | * irq-safe write-lock, but readers can get non-irqsafe | 129 | * irq-safe write-lock, but readers can get non-irqsafe |
| 145 | * read-locks. | 130 | * read-locks. |
| 146 | */ | 131 | */ |
| 147 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
| 148 | 132 | ||
| 149 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | 133 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) |
| 150 | 134 | #define __raw_write_can_lock(rw) (!(rw)->lock) | |
| 151 | #define read_can_lock(rw) ((rw)->lock >= 0) | ||
| 152 | #define write_can_lock(rw) (!(rw)->lock) | ||
| 153 | |||
| 154 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | ||
| 155 | { | ||
| 156 | __asm__ __volatile__("lwsync # write_unlock": : :"memory"); | ||
| 157 | rw->lock = 0; | ||
| 158 | } | ||
| 159 | 135 | ||
| 160 | /* | 136 | /* |
| 161 | * This returns the old value in the lock + 1, | 137 | * This returns the old value in the lock + 1, |
| 162 | * so we got a read lock if the return value is > 0. | 138 | * so we got a read lock if the return value is > 0. |
| 163 | */ | 139 | */ |
| 164 | static long __inline__ __read_trylock(rwlock_t *rw) | 140 | static long __inline__ __read_trylock(raw_rwlock_t *rw) |
| 165 | { | 141 | { |
| 166 | long tmp; | 142 | long tmp; |
| 167 | 143 | ||
| @@ -180,45 +156,11 @@ static long __inline__ __read_trylock(rwlock_t *rw) | |||
| 180 | return tmp; | 156 | return tmp; |
| 181 | } | 157 | } |
| 182 | 158 | ||
| 183 | static int __inline__ _raw_read_trylock(rwlock_t *rw) | ||
| 184 | { | ||
| 185 | return __read_trylock(rw) > 0; | ||
| 186 | } | ||
| 187 | |||
| 188 | static void __inline__ _raw_read_lock(rwlock_t *rw) | ||
| 189 | { | ||
| 190 | while (1) { | ||
| 191 | if (likely(__read_trylock(rw) > 0)) | ||
| 192 | break; | ||
| 193 | do { | ||
| 194 | HMT_low(); | ||
| 195 | if (SHARED_PROCESSOR) | ||
| 196 | __rw_yield(rw); | ||
| 197 | } while (unlikely(rw->lock < 0)); | ||
| 198 | HMT_medium(); | ||
| 199 | } | ||
| 200 | } | ||
| 201 | |||
| 202 | static void __inline__ _raw_read_unlock(rwlock_t *rw) | ||
| 203 | { | ||
| 204 | long tmp; | ||
| 205 | |||
| 206 | __asm__ __volatile__( | ||
| 207 | "eieio # read_unlock\n\ | ||
| 208 | 1: lwarx %0,0,%1\n\ | ||
| 209 | addic %0,%0,-1\n\ | ||
| 210 | stwcx. %0,0,%1\n\ | ||
| 211 | bne- 1b" | ||
| 212 | : "=&r"(tmp) | ||
| 213 | : "r"(&rw->lock) | ||
| 214 | : "cr0", "memory"); | ||
| 215 | } | ||
| 216 | |||
| 217 | /* | 159 | /* |
| 218 | * This returns the old value in the lock, | 160 | * This returns the old value in the lock, |
| 219 | * so we got the write lock if the return value is 0. | 161 | * so we got the write lock if the return value is 0. |
| 220 | */ | 162 | */ |
| 221 | static __inline__ long __write_trylock(rwlock_t *rw) | 163 | static __inline__ long __write_trylock(raw_rwlock_t *rw) |
| 222 | { | 164 | { |
| 223 | long tmp, tmp2; | 165 | long tmp, tmp2; |
| 224 | 166 | ||
| @@ -237,12 +179,21 @@ static __inline__ long __write_trylock(rwlock_t *rw) | |||
| 237 | return tmp; | 179 | return tmp; |
| 238 | } | 180 | } |
| 239 | 181 | ||
| 240 | static int __inline__ _raw_write_trylock(rwlock_t *rw) | 182 | static void __inline__ __raw_read_lock(raw_rwlock_t *rw) |
| 241 | { | 183 | { |
| 242 | return __write_trylock(rw) == 0; | 184 | while (1) { |
| 185 | if (likely(__read_trylock(rw) > 0)) | ||
| 186 | break; | ||
| 187 | do { | ||
| 188 | HMT_low(); | ||
| 189 | if (SHARED_PROCESSOR) | ||
| 190 | __rw_yield(rw); | ||
| 191 | } while (unlikely(rw->lock < 0)); | ||
| 192 | HMT_medium(); | ||
| 193 | } | ||
| 243 | } | 194 | } |
| 244 | 195 | ||
| 245 | static void __inline__ _raw_write_lock(rwlock_t *rw) | 196 | static void __inline__ __raw_write_lock(raw_rwlock_t *rw) |
| 246 | { | 197 | { |
| 247 | while (1) { | 198 | while (1) { |
| 248 | if (likely(__write_trylock(rw) == 0)) | 199 | if (likely(__write_trylock(rw) == 0)) |
| @@ -256,5 +207,35 @@ static void __inline__ _raw_write_lock(rwlock_t *rw) | |||
| 256 | } | 207 | } |
| 257 | } | 208 | } |
| 258 | 209 | ||
| 259 | #endif /* __KERNEL__ */ | 210 | static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) |
| 211 | { | ||
| 212 | return __read_trylock(rw) > 0; | ||
| 213 | } | ||
| 214 | |||
| 215 | static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) | ||
| 216 | { | ||
| 217 | return __write_trylock(rw) == 0; | ||
| 218 | } | ||
| 219 | |||
| 220 | static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) | ||
| 221 | { | ||
| 222 | long tmp; | ||
| 223 | |||
| 224 | __asm__ __volatile__( | ||
| 225 | "eieio # read_unlock\n\ | ||
| 226 | 1: lwarx %0,0,%1\n\ | ||
| 227 | addic %0,%0,-1\n\ | ||
| 228 | stwcx. %0,0,%1\n\ | ||
| 229 | bne- 1b" | ||
| 230 | : "=&r"(tmp) | ||
| 231 | : "r"(&rw->lock) | ||
| 232 | : "cr0", "memory"); | ||
| 233 | } | ||
| 234 | |||
| 235 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | ||
| 236 | { | ||
| 237 | __asm__ __volatile__("lwsync # write_unlock": : :"memory"); | ||
| 238 | rw->lock = 0; | ||
| 239 | } | ||
| 240 | |||
| 260 | #endif /* __ASM_SPINLOCK_H */ | 241 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-ppc64/spinlock_types.h b/include/asm-ppc64/spinlock_types.h new file mode 100644 index 000000000000..a37c8eabb9f2 --- /dev/null +++ b/include/asm-ppc64/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int slock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile signed int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h index 321b23bba1ec..273dbecf8ace 100644 --- a/include/asm-s390/spinlock.h +++ b/include/asm-s390/spinlock.h | |||
| @@ -27,25 +27,19 @@ _raw_compare_and_swap(volatile unsigned int *lock, | |||
| 27 | * on the local processor, one does not. | 27 | * on the local processor, one does not. |
| 28 | * | 28 | * |
| 29 | * We make no fairness assumptions. They have a cost. | 29 | * We make no fairness assumptions. They have a cost. |
| 30 | * | ||
| 31 | * (the type definitions are in asm/spinlock_types.h) | ||
| 30 | */ | 32 | */ |
| 31 | 33 | ||
| 32 | typedef struct { | 34 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 33 | volatile unsigned int lock; | 35 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 34 | #ifdef CONFIG_PREEMPT | 36 | #define __raw_spin_unlock_wait(lock) \ |
| 35 | unsigned int break_lock; | 37 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
| 36 | #endif | ||
| 37 | } __attribute__ ((aligned (4))) spinlock_t; | ||
| 38 | |||
| 39 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
| 40 | #define spin_lock_init(lp) do { (lp)->lock = 0; } while(0) | ||
| 41 | #define spin_unlock_wait(lp) do { barrier(); } while(((volatile spinlock_t *)(lp))->lock) | ||
| 42 | #define spin_is_locked(x) ((x)->lock != 0) | ||
| 43 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 44 | 38 | ||
| 45 | extern void _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc); | 39 | extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc); |
| 46 | extern int _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc); | 40 | extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc); |
| 47 | 41 | ||
| 48 | static inline void _raw_spin_lock(spinlock_t *lp) | 42 | static inline void __raw_spin_lock(raw_spinlock_t *lp) |
| 49 | { | 43 | { |
| 50 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); | 44 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); |
| 51 | 45 | ||
| @@ -53,7 +47,7 @@ static inline void _raw_spin_lock(spinlock_t *lp) | |||
| 53 | _raw_spin_lock_wait(lp, pc); | 47 | _raw_spin_lock_wait(lp, pc); |
| 54 | } | 48 | } |
| 55 | 49 | ||
| 56 | static inline int _raw_spin_trylock(spinlock_t *lp) | 50 | static inline int __raw_spin_trylock(raw_spinlock_t *lp) |
| 57 | { | 51 | { |
| 58 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); | 52 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); |
| 59 | 53 | ||
| @@ -62,7 +56,7 @@ static inline int _raw_spin_trylock(spinlock_t *lp) | |||
| 62 | return _raw_spin_trylock_retry(lp, pc); | 56 | return _raw_spin_trylock_retry(lp, pc); |
| 63 | } | 57 | } |
| 64 | 58 | ||
| 65 | static inline void _raw_spin_unlock(spinlock_t *lp) | 59 | static inline void __raw_spin_unlock(raw_spinlock_t *lp) |
| 66 | { | 60 | { |
| 67 | _raw_compare_and_swap(&lp->lock, lp->lock, 0); | 61 | _raw_compare_and_swap(&lp->lock, lp->lock, 0); |
| 68 | } | 62 | } |
| @@ -77,36 +71,25 @@ static inline void _raw_spin_unlock(spinlock_t *lp) | |||
| 77 | * irq-safe write-lock, but readers can get non-irqsafe | 71 | * irq-safe write-lock, but readers can get non-irqsafe |
| 78 | * read-locks. | 72 | * read-locks. |
| 79 | */ | 73 | */ |
| 80 | typedef struct { | ||
| 81 | volatile unsigned int lock; | ||
| 82 | volatile unsigned long owner_pc; | ||
| 83 | #ifdef CONFIG_PREEMPT | ||
| 84 | unsigned int break_lock; | ||
| 85 | #endif | ||
| 86 | } rwlock_t; | ||
| 87 | |||
| 88 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } | ||
| 89 | |||
| 90 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
| 91 | 74 | ||
| 92 | /** | 75 | /** |
| 93 | * read_can_lock - would read_trylock() succeed? | 76 | * read_can_lock - would read_trylock() succeed? |
| 94 | * @lock: the rwlock in question. | 77 | * @lock: the rwlock in question. |
| 95 | */ | 78 | */ |
| 96 | #define read_can_lock(x) ((int)(x)->lock >= 0) | 79 | #define __raw_read_can_lock(x) ((int)(x)->lock >= 0) |
| 97 | 80 | ||
| 98 | /** | 81 | /** |
| 99 | * write_can_lock - would write_trylock() succeed? | 82 | * write_can_lock - would write_trylock() succeed? |
| 100 | * @lock: the rwlock in question. | 83 | * @lock: the rwlock in question. |
| 101 | */ | 84 | */ |
| 102 | #define write_can_lock(x) ((x)->lock == 0) | 85 | #define __raw_write_can_lock(x) ((x)->lock == 0) |
| 103 | 86 | ||
| 104 | extern void _raw_read_lock_wait(rwlock_t *lp); | 87 | extern void _raw_read_lock_wait(raw_rwlock_t *lp); |
| 105 | extern int _raw_read_trylock_retry(rwlock_t *lp); | 88 | extern int _raw_read_trylock_retry(raw_rwlock_t *lp); |
| 106 | extern void _raw_write_lock_wait(rwlock_t *lp); | 89 | extern void _raw_write_lock_wait(raw_rwlock_t *lp); |
| 107 | extern int _raw_write_trylock_retry(rwlock_t *lp); | 90 | extern int _raw_write_trylock_retry(raw_rwlock_t *lp); |
| 108 | 91 | ||
| 109 | static inline void _raw_read_lock(rwlock_t *rw) | 92 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 110 | { | 93 | { |
| 111 | unsigned int old; | 94 | unsigned int old; |
| 112 | old = rw->lock & 0x7fffffffU; | 95 | old = rw->lock & 0x7fffffffU; |
| @@ -114,7 +97,7 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
| 114 | _raw_read_lock_wait(rw); | 97 | _raw_read_lock_wait(rw); |
| 115 | } | 98 | } |
| 116 | 99 | ||
| 117 | static inline void _raw_read_unlock(rwlock_t *rw) | 100 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
| 118 | { | 101 | { |
| 119 | unsigned int old, cmp; | 102 | unsigned int old, cmp; |
| 120 | 103 | ||
| @@ -125,18 +108,18 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
| 125 | } while (cmp != old); | 108 | } while (cmp != old); |
| 126 | } | 109 | } |
| 127 | 110 | ||
| 128 | static inline void _raw_write_lock(rwlock_t *rw) | 111 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 129 | { | 112 | { |
| 130 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) | 113 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) |
| 131 | _raw_write_lock_wait(rw); | 114 | _raw_write_lock_wait(rw); |
| 132 | } | 115 | } |
| 133 | 116 | ||
| 134 | static inline void _raw_write_unlock(rwlock_t *rw) | 117 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
| 135 | { | 118 | { |
| 136 | _raw_compare_and_swap(&rw->lock, 0x80000000, 0); | 119 | _raw_compare_and_swap(&rw->lock, 0x80000000, 0); |
| 137 | } | 120 | } |
| 138 | 121 | ||
| 139 | static inline int _raw_read_trylock(rwlock_t *rw) | 122 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
| 140 | { | 123 | { |
| 141 | unsigned int old; | 124 | unsigned int old; |
| 142 | old = rw->lock & 0x7fffffffU; | 125 | old = rw->lock & 0x7fffffffU; |
| @@ -145,7 +128,7 @@ static inline int _raw_read_trylock(rwlock_t *rw) | |||
| 145 | return _raw_read_trylock_retry(rw); | 128 | return _raw_read_trylock_retry(rw); |
| 146 | } | 129 | } |
| 147 | 130 | ||
| 148 | static inline int _raw_write_trylock(rwlock_t *rw) | 131 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
| 149 | { | 132 | { |
| 150 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) | 133 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) |
| 151 | return 1; | 134 | return 1; |
diff --git a/include/asm-s390/spinlock_types.h b/include/asm-s390/spinlock_types.h new file mode 100644 index 000000000000..f79a2216204f --- /dev/null +++ b/include/asm-s390/spinlock_types.h | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int lock; | ||
| 10 | } __attribute__ ((aligned (4))) raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | volatile unsigned int owner_pc; | ||
| 17 | } raw_rwlock_t; | ||
| 18 | |||
| 19 | #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } | ||
| 20 | |||
| 21 | #endif | ||
diff --git a/include/asm-sh/spinlock.h b/include/asm-sh/spinlock.h index e770b55649eb..846322d4c35d 100644 --- a/include/asm-sh/spinlock.h +++ b/include/asm-sh/spinlock.h | |||
| @@ -15,20 +15,11 @@ | |||
| 15 | /* | 15 | /* |
| 16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 17 | */ | 17 | */ |
| 18 | typedef struct { | ||
| 19 | volatile unsigned long lock; | ||
| 20 | #ifdef CONFIG_PREEMPT | ||
| 21 | unsigned int break_lock; | ||
| 22 | #endif | ||
| 23 | } spinlock_t; | ||
| 24 | 18 | ||
| 25 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | 19 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 26 | 20 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | |
| 27 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | 21 | #define __raw_spin_unlock_wait(x) \ |
| 28 | 22 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | |
| 29 | #define spin_is_locked(x) ((x)->lock != 0) | ||
| 30 | #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) | ||
| 31 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 32 | 23 | ||
| 33 | /* | 24 | /* |
| 34 | * Simple spin lock operations. There are two variants, one clears IRQ's | 25 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| @@ -36,7 +27,7 @@ typedef struct { | |||
| 36 | * | 27 | * |
| 37 | * We make no fairness assumptions. They have a cost. | 28 | * We make no fairness assumptions. They have a cost. |
| 38 | */ | 29 | */ |
| 39 | static inline void _raw_spin_lock(spinlock_t *lock) | 30 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 40 | { | 31 | { |
| 41 | __asm__ __volatile__ ( | 32 | __asm__ __volatile__ ( |
| 42 | "1:\n\t" | 33 | "1:\n\t" |
| @@ -49,14 +40,14 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 49 | ); | 40 | ); |
| 50 | } | 41 | } |
| 51 | 42 | ||
| 52 | static inline void _raw_spin_unlock(spinlock_t *lock) | 43 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 53 | { | 44 | { |
| 54 | assert_spin_locked(lock); | 45 | assert_spin_locked(lock); |
| 55 | 46 | ||
| 56 | lock->lock = 0; | 47 | lock->lock = 0; |
| 57 | } | 48 | } |
| 58 | 49 | ||
| 59 | #define _raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock)) | 50 | #define __raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock)) |
| 60 | 51 | ||
| 61 | /* | 52 | /* |
| 62 | * Read-write spinlocks, allowing multiple readers but only one writer. | 53 | * Read-write spinlocks, allowing multiple readers but only one writer. |
| @@ -66,51 +57,40 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
| 66 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe | 57 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe |
| 67 | * read-locks. | 58 | * read-locks. |
| 68 | */ | 59 | */ |
| 69 | typedef struct { | 60 | |
| 70 | spinlock_t lock; | 61 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 71 | atomic_t counter; | ||
| 72 | #ifdef CONFIG_PREEMPT | ||
| 73 | unsigned int break_lock; | ||
| 74 | #endif | ||
| 75 | } rwlock_t; | ||
| 76 | |||
| 77 | #define RW_LOCK_BIAS 0x01000000 | ||
| 78 | #define RW_LOCK_UNLOCKED (rwlock_t) { { 0 }, { RW_LOCK_BIAS } } | ||
| 79 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0) | ||
| 80 | |||
| 81 | static inline void _raw_read_lock(rwlock_t *rw) | ||
| 82 | { | 62 | { |
| 83 | _raw_spin_lock(&rw->lock); | 63 | __raw_spin_lock(&rw->lock); |
| 84 | 64 | ||
| 85 | atomic_inc(&rw->counter); | 65 | atomic_inc(&rw->counter); |
| 86 | 66 | ||
| 87 | _raw_spin_unlock(&rw->lock); | 67 | __raw_spin_unlock(&rw->lock); |
| 88 | } | 68 | } |
| 89 | 69 | ||
| 90 | static inline void _raw_read_unlock(rwlock_t *rw) | 70 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
| 91 | { | 71 | { |
| 92 | _raw_spin_lock(&rw->lock); | 72 | __raw_spin_lock(&rw->lock); |
| 93 | 73 | ||
| 94 | atomic_dec(&rw->counter); | 74 | atomic_dec(&rw->counter); |
| 95 | 75 | ||
| 96 | _raw_spin_unlock(&rw->lock); | 76 | __raw_spin_unlock(&rw->lock); |
| 97 | } | 77 | } |
| 98 | 78 | ||
| 99 | static inline void _raw_write_lock(rwlock_t *rw) | 79 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 100 | { | 80 | { |
| 101 | _raw_spin_lock(&rw->lock); | 81 | __raw_spin_lock(&rw->lock); |
| 102 | atomic_set(&rw->counter, -1); | 82 | atomic_set(&rw->counter, -1); |
| 103 | } | 83 | } |
| 104 | 84 | ||
| 105 | static inline void _raw_write_unlock(rwlock_t *rw) | 85 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
| 106 | { | 86 | { |
| 107 | atomic_set(&rw->counter, 0); | 87 | atomic_set(&rw->counter, 0); |
| 108 | _raw_spin_unlock(&rw->lock); | 88 | __raw_spin_unlock(&rw->lock); |
| 109 | } | 89 | } |
| 110 | 90 | ||
| 111 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 91 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 112 | 92 | ||
| 113 | static inline int _raw_write_trylock(rwlock_t *rw) | 93 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
| 114 | { | 94 | { |
| 115 | if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter)) | 95 | if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter)) |
| 116 | return 1; | 96 | return 1; |
| @@ -121,4 +101,3 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
| 121 | } | 101 | } |
| 122 | 102 | ||
| 123 | #endif /* __ASM_SH_SPINLOCK_H */ | 103 | #endif /* __ASM_SH_SPINLOCK_H */ |
| 124 | |||
diff --git a/include/asm-sh/spinlock_types.h b/include/asm-sh/spinlock_types.h new file mode 100644 index 000000000000..8c41b6c3aac8 --- /dev/null +++ b/include/asm-sh/spinlock_types.h | |||
| @@ -0,0 +1,22 @@ | |||
| 1 | #ifndef __ASM_SH_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SH_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned long lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | raw_spinlock_t lock; | ||
| 16 | atomic_t counter; | ||
| 17 | } raw_rwlock_t; | ||
| 18 | |||
| 19 | #define RW_LOCK_BIAS 0x01000000 | ||
| 20 | #define __RAW_RW_LOCK_UNLOCKED { { 0 }, { RW_LOCK_BIAS } } | ||
| 21 | |||
| 22 | #endif | ||
diff --git a/include/asm-sparc/spinlock.h b/include/asm-sparc/spinlock.h index 0cbd87ad4912..111727a2bb4e 100644 --- a/include/asm-sparc/spinlock.h +++ b/include/asm-sparc/spinlock.h | |||
| @@ -12,96 +12,12 @@ | |||
| 12 | 12 | ||
| 13 | #include <asm/psr.h> | 13 | #include <asm/psr.h> |
| 14 | 14 | ||
| 15 | #ifdef CONFIG_DEBUG_SPINLOCK | 15 | #define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) |
| 16 | struct _spinlock_debug { | ||
| 17 | unsigned char lock; | ||
| 18 | unsigned long owner_pc; | ||
| 19 | #ifdef CONFIG_PREEMPT | ||
| 20 | unsigned int break_lock; | ||
| 21 | #endif | ||
| 22 | }; | ||
| 23 | typedef struct _spinlock_debug spinlock_t; | ||
| 24 | |||
| 25 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0 } | ||
| 26 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) | ||
| 27 | #define spin_is_locked(lp) (*((volatile unsigned char *)(&((lp)->lock))) != 0) | ||
| 28 | #define spin_unlock_wait(lp) do { barrier(); } while(*(volatile unsigned char *)(&(lp)->lock)) | ||
| 29 | |||
| 30 | extern void _do_spin_lock(spinlock_t *lock, char *str); | ||
| 31 | extern int _spin_trylock(spinlock_t *lock); | ||
| 32 | extern void _do_spin_unlock(spinlock_t *lock); | ||
| 33 | |||
| 34 | #define _raw_spin_trylock(lp) _spin_trylock(lp) | ||
| 35 | #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock") | ||
| 36 | #define _raw_spin_unlock(lock) _do_spin_unlock(lock) | ||
| 37 | |||
| 38 | struct _rwlock_debug { | ||
| 39 | volatile unsigned int lock; | ||
| 40 | unsigned long owner_pc; | ||
| 41 | unsigned long reader_pc[NR_CPUS]; | ||
| 42 | #ifdef CONFIG_PREEMPT | ||
| 43 | unsigned int break_lock; | ||
| 44 | #endif | ||
| 45 | }; | ||
| 46 | typedef struct _rwlock_debug rwlock_t; | ||
| 47 | |||
| 48 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, {0} } | ||
| 49 | |||
| 50 | #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0) | ||
| 51 | |||
| 52 | extern void _do_read_lock(rwlock_t *rw, char *str); | ||
| 53 | extern void _do_read_unlock(rwlock_t *rw, char *str); | ||
| 54 | extern void _do_write_lock(rwlock_t *rw, char *str); | ||
| 55 | extern void _do_write_unlock(rwlock_t *rw); | ||
| 56 | |||
| 57 | #define _raw_read_lock(lock) \ | ||
| 58 | do { unsigned long flags; \ | ||
| 59 | local_irq_save(flags); \ | ||
| 60 | _do_read_lock(lock, "read_lock"); \ | ||
| 61 | local_irq_restore(flags); \ | ||
| 62 | } while(0) | ||
| 63 | |||
| 64 | #define _raw_read_unlock(lock) \ | ||
| 65 | do { unsigned long flags; \ | ||
| 66 | local_irq_save(flags); \ | ||
| 67 | _do_read_unlock(lock, "read_unlock"); \ | ||
| 68 | local_irq_restore(flags); \ | ||
| 69 | } while(0) | ||
| 70 | |||
| 71 | #define _raw_write_lock(lock) \ | ||
| 72 | do { unsigned long flags; \ | ||
| 73 | local_irq_save(flags); \ | ||
| 74 | _do_write_lock(lock, "write_lock"); \ | ||
| 75 | local_irq_restore(flags); \ | ||
| 76 | } while(0) | ||
| 77 | |||
| 78 | #define _raw_write_unlock(lock) \ | ||
| 79 | do { unsigned long flags; \ | ||
| 80 | local_irq_save(flags); \ | ||
| 81 | _do_write_unlock(lock); \ | ||
| 82 | local_irq_restore(flags); \ | ||
| 83 | } while(0) | ||
| 84 | |||
| 85 | #else /* !CONFIG_DEBUG_SPINLOCK */ | ||
| 86 | |||
| 87 | typedef struct { | ||
| 88 | unsigned char lock; | ||
| 89 | #ifdef CONFIG_PREEMPT | ||
| 90 | unsigned int break_lock; | ||
| 91 | #endif | ||
| 92 | } spinlock_t; | ||
| 93 | |||
| 94 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
| 95 | |||
| 96 | #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0) | ||
| 97 | #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) | ||
| 98 | 16 | ||
| 99 | #define spin_unlock_wait(lock) \ | 17 | #define __raw_spin_unlock_wait(lock) \ |
| 100 | do { \ | 18 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
| 101 | barrier(); \ | ||
| 102 | } while(*((volatile unsigned char *)lock)) | ||
| 103 | 19 | ||
| 104 | extern __inline__ void _raw_spin_lock(spinlock_t *lock) | 20 | extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock) |
| 105 | { | 21 | { |
| 106 | __asm__ __volatile__( | 22 | __asm__ __volatile__( |
| 107 | "\n1:\n\t" | 23 | "\n1:\n\t" |
| @@ -121,7 +37,7 @@ extern __inline__ void _raw_spin_lock(spinlock_t *lock) | |||
| 121 | : "g2", "memory", "cc"); | 37 | : "g2", "memory", "cc"); |
| 122 | } | 38 | } |
| 123 | 39 | ||
| 124 | extern __inline__ int _raw_spin_trylock(spinlock_t *lock) | 40 | extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock) |
| 125 | { | 41 | { |
| 126 | unsigned int result; | 42 | unsigned int result; |
| 127 | __asm__ __volatile__("ldstub [%1], %0" | 43 | __asm__ __volatile__("ldstub [%1], %0" |
| @@ -131,7 +47,7 @@ extern __inline__ int _raw_spin_trylock(spinlock_t *lock) | |||
| 131 | return (result == 0); | 47 | return (result == 0); |
| 132 | } | 48 | } |
| 133 | 49 | ||
| 134 | extern __inline__ void _raw_spin_unlock(spinlock_t *lock) | 50 | extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) |
| 135 | { | 51 | { |
| 136 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); | 52 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); |
| 137 | } | 53 | } |
| @@ -147,23 +63,11 @@ extern __inline__ void _raw_spin_unlock(spinlock_t *lock) | |||
| 147 | * | 63 | * |
| 148 | * XXX This might create some problems with my dual spinlock | 64 | * XXX This might create some problems with my dual spinlock |
| 149 | * XXX scheme, deadlocks etc. -DaveM | 65 | * XXX scheme, deadlocks etc. -DaveM |
| 150 | */ | 66 | * |
| 151 | typedef struct { | 67 | * Sort of like atomic_t's on Sparc, but even more clever. |
| 152 | volatile unsigned int lock; | ||
| 153 | #ifdef CONFIG_PREEMPT | ||
| 154 | unsigned int break_lock; | ||
| 155 | #endif | ||
| 156 | } rwlock_t; | ||
| 157 | |||
| 158 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
| 159 | |||
| 160 | #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0) | ||
| 161 | |||
| 162 | |||
| 163 | /* Sort of like atomic_t's on Sparc, but even more clever. | ||
| 164 | * | 68 | * |
| 165 | * ------------------------------------ | 69 | * ------------------------------------ |
| 166 | * | 24-bit counter | wlock | rwlock_t | 70 | * | 24-bit counter | wlock | raw_rwlock_t |
| 167 | * ------------------------------------ | 71 | * ------------------------------------ |
| 168 | * 31 8 7 0 | 72 | * 31 8 7 0 |
| 169 | * | 73 | * |
| @@ -174,9 +78,9 @@ typedef struct { | |||
| 174 | * | 78 | * |
| 175 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | 79 | * Unfortunately this scheme limits us to ~16,000,000 cpus. |
| 176 | */ | 80 | */ |
| 177 | extern __inline__ void _read_lock(rwlock_t *rw) | 81 | extern __inline__ void __read_lock(raw_rwlock_t *rw) |
| 178 | { | 82 | { |
| 179 | register rwlock_t *lp asm("g1"); | 83 | register raw_rwlock_t *lp asm("g1"); |
| 180 | lp = rw; | 84 | lp = rw; |
| 181 | __asm__ __volatile__( | 85 | __asm__ __volatile__( |
| 182 | "mov %%o7, %%g4\n\t" | 86 | "mov %%o7, %%g4\n\t" |
| @@ -187,16 +91,16 @@ extern __inline__ void _read_lock(rwlock_t *rw) | |||
| 187 | : "g2", "g4", "memory", "cc"); | 91 | : "g2", "g4", "memory", "cc"); |
| 188 | } | 92 | } |
| 189 | 93 | ||
| 190 | #define _raw_read_lock(lock) \ | 94 | #define __raw_read_lock(lock) \ |
| 191 | do { unsigned long flags; \ | 95 | do { unsigned long flags; \ |
| 192 | local_irq_save(flags); \ | 96 | local_irq_save(flags); \ |
| 193 | _read_lock(lock); \ | 97 | __raw_read_lock(lock); \ |
| 194 | local_irq_restore(flags); \ | 98 | local_irq_restore(flags); \ |
| 195 | } while(0) | 99 | } while(0) |
| 196 | 100 | ||
| 197 | extern __inline__ void _read_unlock(rwlock_t *rw) | 101 | extern __inline__ void __read_unlock(raw_rwlock_t *rw) |
| 198 | { | 102 | { |
| 199 | register rwlock_t *lp asm("g1"); | 103 | register raw_rwlock_t *lp asm("g1"); |
| 200 | lp = rw; | 104 | lp = rw; |
| 201 | __asm__ __volatile__( | 105 | __asm__ __volatile__( |
| 202 | "mov %%o7, %%g4\n\t" | 106 | "mov %%o7, %%g4\n\t" |
| @@ -207,16 +111,16 @@ extern __inline__ void _read_unlock(rwlock_t *rw) | |||
| 207 | : "g2", "g4", "memory", "cc"); | 111 | : "g2", "g4", "memory", "cc"); |
| 208 | } | 112 | } |
| 209 | 113 | ||
| 210 | #define _raw_read_unlock(lock) \ | 114 | #define __raw_read_unlock(lock) \ |
| 211 | do { unsigned long flags; \ | 115 | do { unsigned long flags; \ |
| 212 | local_irq_save(flags); \ | 116 | local_irq_save(flags); \ |
| 213 | _read_unlock(lock); \ | 117 | __raw_read_unlock(lock); \ |
| 214 | local_irq_restore(flags); \ | 118 | local_irq_restore(flags); \ |
| 215 | } while(0) | 119 | } while(0) |
| 216 | 120 | ||
| 217 | extern __inline__ void _raw_write_lock(rwlock_t *rw) | 121 | extern __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
| 218 | { | 122 | { |
| 219 | register rwlock_t *lp asm("g1"); | 123 | register raw_rwlock_t *lp asm("g1"); |
| 220 | lp = rw; | 124 | lp = rw; |
| 221 | __asm__ __volatile__( | 125 | __asm__ __volatile__( |
| 222 | "mov %%o7, %%g4\n\t" | 126 | "mov %%o7, %%g4\n\t" |
| @@ -227,11 +131,9 @@ extern __inline__ void _raw_write_lock(rwlock_t *rw) | |||
| 227 | : "g2", "g4", "memory", "cc"); | 131 | : "g2", "g4", "memory", "cc"); |
| 228 | } | 132 | } |
| 229 | 133 | ||
| 230 | #define _raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) | 134 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) |
| 231 | |||
| 232 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
| 233 | 135 | ||
| 234 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 136 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 235 | 137 | ||
| 236 | #endif /* !(__ASSEMBLY__) */ | 138 | #endif /* !(__ASSEMBLY__) */ |
| 237 | 139 | ||
diff --git a/include/asm-sparc/spinlock_types.h b/include/asm-sparc/spinlock_types.h new file mode 100644 index 000000000000..0a0fb116c4ec --- /dev/null +++ b/include/asm-sparc/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __SPARC_SPINLOCK_TYPES_H | ||
| 2 | #define __SPARC_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | unsigned char lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h index a02c4370eb42..ec85d12d73b9 100644 --- a/include/asm-sparc64/spinlock.h +++ b/include/asm-sparc64/spinlock.h | |||
| @@ -29,24 +29,13 @@ | |||
| 29 | * must be pre-V9 branches. | 29 | * must be pre-V9 branches. |
| 30 | */ | 30 | */ |
| 31 | 31 | ||
| 32 | #ifndef CONFIG_DEBUG_SPINLOCK | 32 | #define __raw_spin_is_locked(lp) ((lp)->lock != 0) |
| 33 | 33 | ||
| 34 | typedef struct { | 34 | #define __raw_spin_unlock_wait(lp) \ |
| 35 | volatile unsigned char lock; | 35 | do { rmb(); \ |
| 36 | #ifdef CONFIG_PREEMPT | 36 | } while((lp)->lock) |
| 37 | unsigned int break_lock; | ||
| 38 | #endif | ||
| 39 | } spinlock_t; | ||
| 40 | #define SPIN_LOCK_UNLOCKED (spinlock_t) {0,} | ||
| 41 | 37 | ||
| 42 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) | 38 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 43 | #define spin_is_locked(lp) ((lp)->lock != 0) | ||
| 44 | |||
| 45 | #define spin_unlock_wait(lp) \ | ||
| 46 | do { rmb(); \ | ||
| 47 | } while((lp)->lock) | ||
| 48 | |||
| 49 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
| 50 | { | 39 | { |
| 51 | unsigned long tmp; | 40 | unsigned long tmp; |
| 52 | 41 | ||
| @@ -67,7 +56,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 67 | : "memory"); | 56 | : "memory"); |
| 68 | } | 57 | } |
| 69 | 58 | ||
| 70 | static inline int _raw_spin_trylock(spinlock_t *lock) | 59 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
| 71 | { | 60 | { |
| 72 | unsigned long result; | 61 | unsigned long result; |
| 73 | 62 | ||
| @@ -81,7 +70,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
| 81 | return (result == 0UL); | 70 | return (result == 0UL); |
| 82 | } | 71 | } |
| 83 | 72 | ||
| 84 | static inline void _raw_spin_unlock(spinlock_t *lock) | 73 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 85 | { | 74 | { |
| 86 | __asm__ __volatile__( | 75 | __asm__ __volatile__( |
| 87 | " membar #StoreStore | #LoadStore\n" | 76 | " membar #StoreStore | #LoadStore\n" |
| @@ -91,7 +80,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
| 91 | : "memory"); | 80 | : "memory"); |
| 92 | } | 81 | } |
| 93 | 82 | ||
| 94 | static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | 83 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
| 95 | { | 84 | { |
| 96 | unsigned long tmp1, tmp2; | 85 | unsigned long tmp1, tmp2; |
| 97 | 86 | ||
| @@ -115,51 +104,9 @@ static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | |||
| 115 | : "memory"); | 104 | : "memory"); |
| 116 | } | 105 | } |
| 117 | 106 | ||
| 118 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
| 119 | |||
| 120 | typedef struct { | ||
| 121 | volatile unsigned char lock; | ||
| 122 | unsigned int owner_pc, owner_cpu; | ||
| 123 | #ifdef CONFIG_PREEMPT | ||
| 124 | unsigned int break_lock; | ||
| 125 | #endif | ||
| 126 | } spinlock_t; | ||
| 127 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff } | ||
| 128 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) | ||
| 129 | #define spin_is_locked(__lock) ((__lock)->lock != 0) | ||
| 130 | #define spin_unlock_wait(__lock) \ | ||
| 131 | do { \ | ||
| 132 | rmb(); \ | ||
| 133 | } while((__lock)->lock) | ||
| 134 | |||
| 135 | extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller); | ||
| 136 | extern void _do_spin_unlock(spinlock_t *lock); | ||
| 137 | extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller); | ||
| 138 | |||
| 139 | #define _raw_spin_trylock(lp) \ | ||
| 140 | _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0)) | ||
| 141 | #define _raw_spin_lock(lock) \ | ||
| 142 | _do_spin_lock(lock, "spin_lock", \ | ||
| 143 | (unsigned long) __builtin_return_address(0)) | ||
| 144 | #define _raw_spin_unlock(lock) _do_spin_unlock(lock) | ||
| 145 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 146 | |||
| 147 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
| 148 | |||
| 149 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 107 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
| 150 | 108 | ||
| 151 | #ifndef CONFIG_DEBUG_SPINLOCK | 109 | static void inline __read_lock(raw_rwlock_t *lock) |
| 152 | |||
| 153 | typedef struct { | ||
| 154 | volatile unsigned int lock; | ||
| 155 | #ifdef CONFIG_PREEMPT | ||
| 156 | unsigned int break_lock; | ||
| 157 | #endif | ||
| 158 | } rwlock_t; | ||
| 159 | #define RW_LOCK_UNLOCKED (rwlock_t) {0,} | ||
| 160 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | ||
| 161 | |||
| 162 | static void inline __read_lock(rwlock_t *lock) | ||
| 163 | { | 110 | { |
| 164 | unsigned long tmp1, tmp2; | 111 | unsigned long tmp1, tmp2; |
| 165 | 112 | ||
| @@ -184,7 +131,7 @@ static void inline __read_lock(rwlock_t *lock) | |||
| 184 | : "memory"); | 131 | : "memory"); |
| 185 | } | 132 | } |
| 186 | 133 | ||
| 187 | static void inline __read_unlock(rwlock_t *lock) | 134 | static void inline __read_unlock(raw_rwlock_t *lock) |
| 188 | { | 135 | { |
| 189 | unsigned long tmp1, tmp2; | 136 | unsigned long tmp1, tmp2; |
| 190 | 137 | ||
| @@ -201,7 +148,7 @@ static void inline __read_unlock(rwlock_t *lock) | |||
| 201 | : "memory"); | 148 | : "memory"); |
| 202 | } | 149 | } |
| 203 | 150 | ||
| 204 | static void inline __write_lock(rwlock_t *lock) | 151 | static void inline __write_lock(raw_rwlock_t *lock) |
| 205 | { | 152 | { |
| 206 | unsigned long mask, tmp1, tmp2; | 153 | unsigned long mask, tmp1, tmp2; |
| 207 | 154 | ||
| @@ -228,7 +175,7 @@ static void inline __write_lock(rwlock_t *lock) | |||
| 228 | : "memory"); | 175 | : "memory"); |
| 229 | } | 176 | } |
| 230 | 177 | ||
| 231 | static void inline __write_unlock(rwlock_t *lock) | 178 | static void inline __write_unlock(raw_rwlock_t *lock) |
| 232 | { | 179 | { |
| 233 | __asm__ __volatile__( | 180 | __asm__ __volatile__( |
| 234 | " membar #LoadStore | #StoreStore\n" | 181 | " membar #LoadStore | #StoreStore\n" |
| @@ -238,7 +185,7 @@ static void inline __write_unlock(rwlock_t *lock) | |||
| 238 | : "memory"); | 185 | : "memory"); |
| 239 | } | 186 | } |
| 240 | 187 | ||
| 241 | static int inline __write_trylock(rwlock_t *lock) | 188 | static int inline __write_trylock(raw_rwlock_t *lock) |
| 242 | { | 189 | { |
| 243 | unsigned long mask, tmp1, tmp2, result; | 190 | unsigned long mask, tmp1, tmp2, result; |
| 244 | 191 | ||
| @@ -263,78 +210,15 @@ static int inline __write_trylock(rwlock_t *lock) | |||
| 263 | return result; | 210 | return result; |
| 264 | } | 211 | } |
| 265 | 212 | ||
| 266 | #define _raw_read_lock(p) __read_lock(p) | 213 | #define __raw_read_lock(p) __read_lock(p) |
| 267 | #define _raw_read_unlock(p) __read_unlock(p) | 214 | #define __raw_read_unlock(p) __read_unlock(p) |
| 268 | #define _raw_write_lock(p) __write_lock(p) | 215 | #define __raw_write_lock(p) __write_lock(p) |
| 269 | #define _raw_write_unlock(p) __write_unlock(p) | 216 | #define __raw_write_unlock(p) __write_unlock(p) |
| 270 | #define _raw_write_trylock(p) __write_trylock(p) | 217 | #define __raw_write_trylock(p) __write_trylock(p) |
| 271 | 218 | ||
| 272 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | 219 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 273 | 220 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | |
| 274 | typedef struct { | 221 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
| 275 | volatile unsigned long lock; | ||
| 276 | unsigned int writer_pc, writer_cpu; | ||
| 277 | unsigned int reader_pc[NR_CPUS]; | ||
| 278 | #ifdef CONFIG_PREEMPT | ||
| 279 | unsigned int break_lock; | ||
| 280 | #endif | ||
| 281 | } rwlock_t; | ||
| 282 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } } | ||
| 283 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | ||
| 284 | |||
| 285 | extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller); | ||
| 286 | extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller); | ||
| 287 | extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller); | ||
| 288 | extern void _do_write_unlock(rwlock_t *rw, unsigned long caller); | ||
| 289 | extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller); | ||
| 290 | |||
| 291 | #define _raw_read_lock(lock) \ | ||
| 292 | do { unsigned long flags; \ | ||
| 293 | local_irq_save(flags); \ | ||
| 294 | _do_read_lock(lock, "read_lock", \ | ||
| 295 | (unsigned long) __builtin_return_address(0)); \ | ||
| 296 | local_irq_restore(flags); \ | ||
| 297 | } while(0) | ||
| 298 | |||
| 299 | #define _raw_read_unlock(lock) \ | ||
| 300 | do { unsigned long flags; \ | ||
| 301 | local_irq_save(flags); \ | ||
| 302 | _do_read_unlock(lock, "read_unlock", \ | ||
| 303 | (unsigned long) __builtin_return_address(0)); \ | ||
| 304 | local_irq_restore(flags); \ | ||
| 305 | } while(0) | ||
| 306 | |||
| 307 | #define _raw_write_lock(lock) \ | ||
| 308 | do { unsigned long flags; \ | ||
| 309 | local_irq_save(flags); \ | ||
| 310 | _do_write_lock(lock, "write_lock", \ | ||
| 311 | (unsigned long) __builtin_return_address(0)); \ | ||
| 312 | local_irq_restore(flags); \ | ||
| 313 | } while(0) | ||
| 314 | |||
| 315 | #define _raw_write_unlock(lock) \ | ||
| 316 | do { unsigned long flags; \ | ||
| 317 | local_irq_save(flags); \ | ||
| 318 | _do_write_unlock(lock, \ | ||
| 319 | (unsigned long) __builtin_return_address(0)); \ | ||
| 320 | local_irq_restore(flags); \ | ||
| 321 | } while(0) | ||
| 322 | |||
| 323 | #define _raw_write_trylock(lock) \ | ||
| 324 | ({ unsigned long flags; \ | ||
| 325 | int val; \ | ||
| 326 | local_irq_save(flags); \ | ||
| 327 | val = _do_write_trylock(lock, "write_trylock", \ | ||
| 328 | (unsigned long) __builtin_return_address(0)); \ | ||
| 329 | local_irq_restore(flags); \ | ||
| 330 | val; \ | ||
| 331 | }) | ||
| 332 | |||
| 333 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
| 334 | |||
| 335 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | ||
| 336 | #define read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | ||
| 337 | #define write_can_lock(rw) (!(rw)->lock) | ||
| 338 | 222 | ||
| 339 | #endif /* !(__ASSEMBLY__) */ | 223 | #endif /* !(__ASSEMBLY__) */ |
| 340 | 224 | ||
diff --git a/include/asm-sparc64/spinlock_types.h b/include/asm-sparc64/spinlock_types.h new file mode 100644 index 000000000000..e128112a0d7c --- /dev/null +++ b/include/asm-sparc64/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __SPARC64_SPINLOCK_TYPES_H | ||
| 2 | #define __SPARC64_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned char lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 6c813eb521f3..f7574196424e 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | struct cpuinfo_x86; | 8 | struct cpuinfo_x86; |
| 9 | struct pt_regs; | 9 | struct pt_regs; |
| 10 | 10 | ||
| 11 | extern void get_cpu_vendor(struct cpuinfo_x86*); | ||
| 12 | extern void start_kernel(void); | 11 | extern void start_kernel(void); |
| 13 | extern void pda_init(int); | 12 | extern void pda_init(int); |
| 14 | 13 | ||
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 5aeb57a3baad..69636831ad2f 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h | |||
| @@ -6,47 +6,21 @@ | |||
| 6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
| 7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
| 8 | 8 | ||
| 9 | extern int printk(const char * fmt, ...) | ||
| 10 | __attribute__ ((format (printf, 1, 2))); | ||
| 11 | |||
| 12 | /* | 9 | /* |
| 13 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 14 | */ | 11 | * |
| 15 | |||
| 16 | typedef struct { | ||
| 17 | volatile unsigned int lock; | ||
| 18 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 19 | unsigned magic; | ||
| 20 | #endif | ||
| 21 | #ifdef CONFIG_PREEMPT | ||
| 22 | unsigned int break_lock; | ||
| 23 | #endif | ||
| 24 | } spinlock_t; | ||
| 25 | |||
| 26 | #define SPINLOCK_MAGIC 0xdead4ead | ||
| 27 | |||
| 28 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 29 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
| 30 | #else | ||
| 31 | #define SPINLOCK_MAGIC_INIT /* */ | ||
| 32 | #endif | ||
| 33 | |||
| 34 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
| 35 | |||
| 36 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 37 | |||
| 38 | /* | ||
| 39 | * Simple spin lock operations. There are two variants, one clears IRQ's | 12 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 40 | * on the local processor, one does not. | 13 | * on the local processor, one does not. |
| 41 | * | 14 | * |
| 42 | * We make no fairness assumptions. They have a cost. | 15 | * We make no fairness assumptions. They have a cost. |
| 16 | * | ||
| 17 | * (the type definitions are in asm/spinlock_types.h) | ||
| 43 | */ | 18 | */ |
| 44 | 19 | ||
| 45 | #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) | 20 | #define __raw_spin_is_locked(x) \ |
| 46 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 21 | (*(volatile signed char *)(&(x)->slock) <= 0) |
| 47 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 48 | 22 | ||
| 49 | #define spin_lock_string \ | 23 | #define __raw_spin_lock_string \ |
| 50 | "\n1:\t" \ | 24 | "\n1:\t" \ |
| 51 | "lock ; decb %0\n\t" \ | 25 | "lock ; decb %0\n\t" \ |
| 52 | "js 2f\n" \ | 26 | "js 2f\n" \ |
| @@ -58,74 +32,40 @@ typedef struct { | |||
| 58 | "jmp 1b\n" \ | 32 | "jmp 1b\n" \ |
| 59 | LOCK_SECTION_END | 33 | LOCK_SECTION_END |
| 60 | 34 | ||
| 61 | /* | 35 | #define __raw_spin_unlock_string \ |
| 62 | * This works. Despite all the confusion. | ||
| 63 | * (except on PPro SMP or if we are using OOSTORE) | ||
| 64 | * (PPro errata 66, 92) | ||
| 65 | */ | ||
| 66 | |||
| 67 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | ||
| 68 | |||
| 69 | #define spin_unlock_string \ | ||
| 70 | "movb $1,%0" \ | 36 | "movb $1,%0" \ |
| 71 | :"=m" (lock->lock) : : "memory" | 37 | :"=m" (lock->slock) : : "memory" |
| 72 | |||
| 73 | |||
| 74 | static inline void _raw_spin_unlock(spinlock_t *lock) | ||
| 75 | { | ||
| 76 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 77 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
| 78 | assert_spin_locked(lock); | ||
| 79 | #endif | ||
| 80 | __asm__ __volatile__( | ||
| 81 | spin_unlock_string | ||
| 82 | ); | ||
| 83 | } | ||
| 84 | |||
| 85 | #else | ||
| 86 | |||
| 87 | #define spin_unlock_string \ | ||
| 88 | "xchgb %b0, %1" \ | ||
| 89 | :"=q" (oldval), "=m" (lock->lock) \ | ||
| 90 | :"0" (oldval) : "memory" | ||
| 91 | 38 | ||
| 92 | static inline void _raw_spin_unlock(spinlock_t *lock) | 39 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 93 | { | 40 | { |
| 94 | char oldval = 1; | ||
| 95 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 96 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
| 97 | assert_spin_locked(lock); | ||
| 98 | #endif | ||
| 99 | __asm__ __volatile__( | 41 | __asm__ __volatile__( |
| 100 | spin_unlock_string | 42 | __raw_spin_lock_string |
| 101 | ); | 43 | :"=m" (lock->slock) : : "memory"); |
| 102 | } | 44 | } |
| 103 | 45 | ||
| 104 | #endif | 46 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 105 | 47 | ||
| 106 | static inline int _raw_spin_trylock(spinlock_t *lock) | 48 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
| 107 | { | 49 | { |
| 108 | char oldval; | 50 | char oldval; |
| 51 | |||
| 109 | __asm__ __volatile__( | 52 | __asm__ __volatile__( |
| 110 | "xchgb %b0,%1" | 53 | "xchgb %b0,%1" |
| 111 | :"=q" (oldval), "=m" (lock->lock) | 54 | :"=q" (oldval), "=m" (lock->slock) |
| 112 | :"0" (0) : "memory"); | 55 | :"0" (0) : "memory"); |
| 56 | |||
| 113 | return oldval > 0; | 57 | return oldval > 0; |
| 114 | } | 58 | } |
| 115 | 59 | ||
| 116 | static inline void _raw_spin_lock(spinlock_t *lock) | 60 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 117 | { | 61 | { |
| 118 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 119 | if (lock->magic != SPINLOCK_MAGIC) { | ||
| 120 | printk("eip: %p\n", __builtin_return_address(0)); | ||
| 121 | BUG(); | ||
| 122 | } | ||
| 123 | #endif | ||
| 124 | __asm__ __volatile__( | 62 | __asm__ __volatile__( |
| 125 | spin_lock_string | 63 | __raw_spin_unlock_string |
| 126 | :"=m" (lock->lock) : : "memory"); | 64 | ); |
| 127 | } | 65 | } |
| 128 | 66 | ||
| 67 | #define __raw_spin_unlock_wait(lock) \ | ||
| 68 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
| 129 | 69 | ||
| 130 | /* | 70 | /* |
| 131 | * Read-write spinlocks, allowing multiple readers | 71 | * Read-write spinlocks, allowing multiple readers |
| @@ -136,33 +76,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 136 | * can "mix" irq-safe locks - any writer needs to get a | 76 | * can "mix" irq-safe locks - any writer needs to get a |
| 137 | * irq-safe write-lock, but readers can get non-irqsafe | 77 | * irq-safe write-lock, but readers can get non-irqsafe |
| 138 | * read-locks. | 78 | * read-locks. |
| 139 | */ | 79 | * |
| 140 | typedef struct { | ||
| 141 | volatile unsigned int lock; | ||
| 142 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 143 | unsigned magic; | ||
| 144 | #endif | ||
| 145 | #ifdef CONFIG_PREEMPT | ||
| 146 | unsigned int break_lock; | ||
| 147 | #endif | ||
| 148 | } rwlock_t; | ||
| 149 | |||
| 150 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
| 151 | |||
| 152 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 153 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
| 154 | #else | ||
| 155 | #define RWLOCK_MAGIC_INIT /* */ | ||
| 156 | #endif | ||
| 157 | |||
| 158 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
| 159 | |||
| 160 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
| 161 | |||
| 162 | #define read_can_lock(x) ((int)(x)->lock > 0) | ||
| 163 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
| 164 | |||
| 165 | /* | ||
| 166 | * On x86, we implement read-write locks as a 32-bit counter | 80 | * On x86, we implement read-write locks as a 32-bit counter |
| 167 | * with the high bit (sign) being the "contended" bit. | 81 | * with the high bit (sign) being the "contended" bit. |
| 168 | * | 82 | * |
| @@ -170,29 +84,24 @@ typedef struct { | |||
| 170 | * | 84 | * |
| 171 | * Changed to use the same technique as rw semaphores. See | 85 | * Changed to use the same technique as rw semaphores. See |
| 172 | * semaphore.h for details. -ben | 86 | * semaphore.h for details. -ben |
| 87 | * | ||
| 88 | * the helpers are in arch/i386/kernel/semaphore.c | ||
| 173 | */ | 89 | */ |
| 174 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
| 175 | 90 | ||
| 176 | static inline void _raw_read_lock(rwlock_t *rw) | 91 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
| 92 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
| 93 | |||
| 94 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
| 177 | { | 95 | { |
| 178 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 179 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
| 180 | #endif | ||
| 181 | __build_read_lock(rw, "__read_lock_failed"); | 96 | __build_read_lock(rw, "__read_lock_failed"); |
| 182 | } | 97 | } |
| 183 | 98 | ||
| 184 | static inline void _raw_write_lock(rwlock_t *rw) | 99 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 185 | { | 100 | { |
| 186 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 187 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
| 188 | #endif | ||
| 189 | __build_write_lock(rw, "__write_lock_failed"); | 101 | __build_write_lock(rw, "__write_lock_failed"); |
| 190 | } | 102 | } |
| 191 | 103 | ||
| 192 | #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") | 104 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
| 193 | #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") | ||
| 194 | |||
| 195 | static inline int _raw_read_trylock(rwlock_t *lock) | ||
| 196 | { | 105 | { |
| 197 | atomic_t *count = (atomic_t *)lock; | 106 | atomic_t *count = (atomic_t *)lock; |
| 198 | atomic_dec(count); | 107 | atomic_dec(count); |
| @@ -202,7 +111,7 @@ static inline int _raw_read_trylock(rwlock_t *lock) | |||
| 202 | return 0; | 111 | return 0; |
| 203 | } | 112 | } |
| 204 | 113 | ||
| 205 | static inline int _raw_write_trylock(rwlock_t *lock) | 114 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
| 206 | { | 115 | { |
| 207 | atomic_t *count = (atomic_t *)lock; | 116 | atomic_t *count = (atomic_t *)lock; |
| 208 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 117 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
| @@ -211,4 +120,15 @@ static inline int _raw_write_trylock(rwlock_t *lock) | |||
| 211 | return 0; | 120 | return 0; |
| 212 | } | 121 | } |
| 213 | 122 | ||
| 123 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
| 124 | { | ||
| 125 | asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); | ||
| 126 | } | ||
| 127 | |||
| 128 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
| 129 | { | ||
| 130 | asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" | ||
| 131 | : "=m" (rw->lock) : : "memory"); | ||
| 132 | } | ||
| 133 | |||
| 214 | #endif /* __ASM_SPINLOCK_H */ | 134 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h new file mode 100644 index 000000000000..59efe849f351 --- /dev/null +++ b/include/asm-x86_64/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int slock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/linux/bio.h b/include/linux/bio.h index cdaf03a14a51..6e1c79c8b6bf 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -314,9 +314,8 @@ void zero_fill_bio(struct bio *bio); | |||
| 314 | * bvec_kmap_irq and bvec_kunmap_irq!! | 314 | * bvec_kmap_irq and bvec_kunmap_irq!! |
| 315 | * | 315 | * |
| 316 | * This function MUST be inlined - it plays with the CPU interrupt flags. | 316 | * This function MUST be inlined - it plays with the CPU interrupt flags. |
| 317 | * Hence the `extern inline'. | ||
| 318 | */ | 317 | */ |
| 319 | extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) | 318 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
| 320 | { | 319 | { |
| 321 | unsigned long addr; | 320 | unsigned long addr; |
| 322 | 321 | ||
| @@ -332,7 +331,7 @@ extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) | |||
| 332 | return (char *) addr + bvec->bv_offset; | 331 | return (char *) addr + bvec->bv_offset; |
| 333 | } | 332 | } |
| 334 | 333 | ||
| 335 | extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) | 334 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) |
| 336 | { | 335 | { |
| 337 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; | 336 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; |
| 338 | 337 | ||
| @@ -345,7 +344,7 @@ extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) | |||
| 345 | #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) | 344 | #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) |
| 346 | #endif | 345 | #endif |
| 347 | 346 | ||
| 348 | extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, | 347 | static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, |
| 349 | unsigned long *flags) | 348 | unsigned long *flags) |
| 350 | { | 349 | { |
| 351 | return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); | 350 | return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); |
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h new file mode 100644 index 000000000000..6b20af0bbb79 --- /dev/null +++ b/include/linux/bit_spinlock.h | |||
| @@ -0,0 +1,77 @@ | |||
| 1 | #ifndef __LINUX_BIT_SPINLOCK_H | ||
| 2 | #define __LINUX_BIT_SPINLOCK_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * bit-based spin_lock() | ||
| 6 | * | ||
| 7 | * Don't use this unless you really need to: spin_lock() and spin_unlock() | ||
| 8 | * are significantly faster. | ||
| 9 | */ | ||
| 10 | static inline void bit_spin_lock(int bitnum, unsigned long *addr) | ||
| 11 | { | ||
| 12 | /* | ||
| 13 | * Assuming the lock is uncontended, this never enters | ||
| 14 | * the body of the outer loop. If it is contended, then | ||
| 15 | * within the inner loop a non-atomic test is used to | ||
| 16 | * busywait with less bus contention for a good time to | ||
| 17 | * attempt to acquire the lock bit. | ||
| 18 | */ | ||
| 19 | preempt_disable(); | ||
| 20 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
| 21 | while (test_and_set_bit(bitnum, addr)) { | ||
| 22 | while (test_bit(bitnum, addr)) { | ||
| 23 | preempt_enable(); | ||
| 24 | cpu_relax(); | ||
| 25 | preempt_disable(); | ||
| 26 | } | ||
| 27 | } | ||
| 28 | #endif | ||
| 29 | __acquire(bitlock); | ||
| 30 | } | ||
| 31 | |||
| 32 | /* | ||
| 33 | * Return true if it was acquired | ||
| 34 | */ | ||
| 35 | static inline int bit_spin_trylock(int bitnum, unsigned long *addr) | ||
| 36 | { | ||
| 37 | preempt_disable(); | ||
| 38 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
| 39 | if (test_and_set_bit(bitnum, addr)) { | ||
| 40 | preempt_enable(); | ||
| 41 | return 0; | ||
| 42 | } | ||
| 43 | #endif | ||
| 44 | __acquire(bitlock); | ||
| 45 | return 1; | ||
| 46 | } | ||
| 47 | |||
| 48 | /* | ||
| 49 | * bit-based spin_unlock() | ||
| 50 | */ | ||
| 51 | static inline void bit_spin_unlock(int bitnum, unsigned long *addr) | ||
| 52 | { | ||
| 53 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
| 54 | BUG_ON(!test_bit(bitnum, addr)); | ||
| 55 | smp_mb__before_clear_bit(); | ||
| 56 | clear_bit(bitnum, addr); | ||
| 57 | #endif | ||
| 58 | preempt_enable(); | ||
| 59 | __release(bitlock); | ||
| 60 | } | ||
| 61 | |||
| 62 | /* | ||
| 63 | * Return true if the lock is held. | ||
| 64 | */ | ||
| 65 | static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) | ||
| 66 | { | ||
| 67 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
| 68 | return test_bit(bitnum, addr); | ||
| 69 | #elif defined CONFIG_PREEMPT | ||
| 70 | return preempt_count(); | ||
| 71 | #else | ||
| 72 | return 1; | ||
| 73 | #endif | ||
| 74 | } | ||
| 75 | |||
| 76 | #endif /* __LINUX_BIT_SPINLOCK_H */ | ||
| 77 | |||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aefa26fbae8a..efdc9b5bc05c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -728,7 +728,7 @@ static inline unsigned int blksize_bits(unsigned int size) | |||
| 728 | return bits; | 728 | return bits; |
| 729 | } | 729 | } |
| 730 | 730 | ||
| 731 | extern inline unsigned int block_size(struct block_device *bdev) | 731 | static inline unsigned int block_size(struct block_device *bdev) |
| 732 | { | 732 | { |
| 733 | return bdev->bd_block_size; | 733 | return bdev->bd_block_size; |
| 734 | } | 734 | } |
diff --git a/include/linux/chio.h b/include/linux/chio.h index 63035ae67e63..a404c111c937 100644 --- a/include/linux/chio.h +++ b/include/linux/chio.h | |||
| @@ -96,7 +96,7 @@ struct changer_position { | |||
| 96 | */ | 96 | */ |
| 97 | struct changer_element_status { | 97 | struct changer_element_status { |
| 98 | int ces_type; | 98 | int ces_type; |
| 99 | unsigned char *ces_data; | 99 | unsigned char __user *ces_data; |
| 100 | }; | 100 | }; |
| 101 | #define CESTATUS_FULL 0x01 /* full */ | 101 | #define CESTATUS_FULL 0x01 /* full */ |
| 102 | #define CESTATUS_IMPEXP 0x02 /* media was imported (inserted by sysop) */ | 102 | #define CESTATUS_IMPEXP 0x02 /* media was imported (inserted by sysop) */ |
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h index e60bfdac348d..4932ee5c77f0 100644 --- a/include/linux/dmapool.h +++ b/include/linux/dmapool.h | |||
| @@ -19,7 +19,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, | |||
| 19 | 19 | ||
| 20 | void dma_pool_destroy(struct dma_pool *pool); | 20 | void dma_pool_destroy(struct dma_pool *pool); |
| 21 | 21 | ||
| 22 | void *dma_pool_alloc(struct dma_pool *pool, int mem_flags, dma_addr_t *handle); | 22 | void *dma_pool_alloc(struct dma_pool *pool, unsigned int __nocast mem_flags, |
| 23 | dma_addr_t *handle); | ||
| 23 | 24 | ||
| 24 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); | 25 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); |
| 25 | 26 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 7f61227827d7..e0b77c5af9a0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -1509,8 +1509,6 @@ extern void do_generic_mapping_read(struct address_space *mapping, | |||
| 1509 | loff_t *, read_descriptor_t *, read_actor_t); | 1509 | loff_t *, read_descriptor_t *, read_actor_t); |
| 1510 | extern void | 1510 | extern void |
| 1511 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); | 1511 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); |
| 1512 | extern ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb, | ||
| 1513 | const struct iovec *iov, loff_t offset, unsigned long nr_segs); | ||
| 1514 | extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, | 1512 | extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, |
| 1515 | unsigned long nr_segs, loff_t *ppos); | 1513 | unsigned long nr_segs, loff_t *ppos); |
| 1516 | ssize_t generic_file_writev(struct file *filp, const struct iovec *iov, | 1514 | ssize_t generic_file_writev(struct file *filp, const struct iovec *iov, |
diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 84321a4cac93..de097269bd7f 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/buffer_head.h> | 28 | #include <linux/buffer_head.h> |
| 29 | #include <linux/journal-head.h> | 29 | #include <linux/journal-head.h> |
| 30 | #include <linux/stddef.h> | 30 | #include <linux/stddef.h> |
| 31 | #include <linux/bit_spinlock.h> | ||
| 31 | #include <asm/semaphore.h> | 32 | #include <asm/semaphore.h> |
| 32 | #endif | 33 | #endif |
| 33 | 34 | ||
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index d7a2555a886c..6acfdbba734b 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h | |||
| @@ -254,23 +254,23 @@ static inline u64 get_jiffies_64(void) | |||
| 254 | */ | 254 | */ |
| 255 | static inline unsigned int jiffies_to_msecs(const unsigned long j) | 255 | static inline unsigned int jiffies_to_msecs(const unsigned long j) |
| 256 | { | 256 | { |
| 257 | #if HZ <= 1000 && !(1000 % HZ) | 257 | #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) |
| 258 | return (1000 / HZ) * j; | 258 | return (MSEC_PER_SEC / HZ) * j; |
| 259 | #elif HZ > 1000 && !(HZ % 1000) | 259 | #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) |
| 260 | return (j + (HZ / 1000) - 1)/(HZ / 1000); | 260 | return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); |
| 261 | #else | 261 | #else |
| 262 | return (j * 1000) / HZ; | 262 | return (j * MSEC_PER_SEC) / HZ; |
| 263 | #endif | 263 | #endif |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | static inline unsigned int jiffies_to_usecs(const unsigned long j) | 266 | static inline unsigned int jiffies_to_usecs(const unsigned long j) |
| 267 | { | 267 | { |
| 268 | #if HZ <= 1000000 && !(1000000 % HZ) | 268 | #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) |
| 269 | return (1000000 / HZ) * j; | 269 | return (USEC_PER_SEC / HZ) * j; |
| 270 | #elif HZ > 1000000 && !(HZ % 1000000) | 270 | #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) |
| 271 | return (j + (HZ / 1000000) - 1)/(HZ / 1000000); | 271 | return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC); |
| 272 | #else | 272 | #else |
| 273 | return (j * 1000000) / HZ; | 273 | return (j * USEC_PER_SEC) / HZ; |
| 274 | #endif | 274 | #endif |
| 275 | } | 275 | } |
| 276 | 276 | ||
| @@ -278,12 +278,12 @@ static inline unsigned long msecs_to_jiffies(const unsigned int m) | |||
| 278 | { | 278 | { |
| 279 | if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) | 279 | if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) |
| 280 | return MAX_JIFFY_OFFSET; | 280 | return MAX_JIFFY_OFFSET; |
| 281 | #if HZ <= 1000 && !(1000 % HZ) | 281 | #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) |
| 282 | return (m + (1000 / HZ) - 1) / (1000 / HZ); | 282 | return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); |
| 283 | #elif HZ > 1000 && !(HZ % 1000) | 283 | #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) |
| 284 | return m * (HZ / 1000); | 284 | return m * (HZ / MSEC_PER_SEC); |
| 285 | #else | 285 | #else |
| 286 | return (m * HZ + 999) / 1000; | 286 | return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; |
| 287 | #endif | 287 | #endif |
| 288 | } | 288 | } |
| 289 | 289 | ||
| @@ -291,12 +291,12 @@ static inline unsigned long usecs_to_jiffies(const unsigned int u) | |||
| 291 | { | 291 | { |
| 292 | if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) | 292 | if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) |
| 293 | return MAX_JIFFY_OFFSET; | 293 | return MAX_JIFFY_OFFSET; |
| 294 | #if HZ <= 1000000 && !(1000000 % HZ) | 294 | #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) |
| 295 | return (u + (1000000 / HZ) - 1) / (1000000 / HZ); | 295 | return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); |
| 296 | #elif HZ > 1000000 && !(HZ % 1000000) | 296 | #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) |
| 297 | return u * (HZ / 1000000); | 297 | return u * (HZ / USEC_PER_SEC); |
| 298 | #else | 298 | #else |
| 299 | return (u * HZ + 999999) / 1000000; | 299 | return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC; |
| 300 | #endif | 300 | #endif |
| 301 | } | 301 | } |
| 302 | 302 | ||
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 8081a281fa5e..9c51917b1cce 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | 24 | ||
| 25 | struct radix_tree_root { | 25 | struct radix_tree_root { |
| 26 | unsigned int height; | 26 | unsigned int height; |
| 27 | int gfp_mask; | 27 | unsigned int gfp_mask; |
| 28 | struct radix_tree_node *rnode; | 28 | struct radix_tree_node *rnode; |
| 29 | }; | 29 | }; |
| 30 | 30 | ||
| @@ -50,7 +50,7 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long); | |||
| 50 | unsigned int | 50 | unsigned int |
| 51 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | 51 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, |
| 52 | unsigned long first_index, unsigned int max_items); | 52 | unsigned long first_index, unsigned int max_items); |
| 53 | int radix_tree_preload(int gfp_mask); | 53 | int radix_tree_preload(unsigned int __nocast gfp_mask); |
| 54 | void radix_tree_init(void); | 54 | void radix_tree_init(void); |
| 55 | void *radix_tree_tag_set(struct radix_tree_root *root, | 55 | void *radix_tree_tag_set(struct radix_tree_root *root, |
| 56 | unsigned long index, int tag); | 56 | unsigned long index, int tag); |
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 17e458e17e2b..af00b10294cd 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h | |||
| @@ -2097,7 +2097,7 @@ void reiserfs_free_block(struct reiserfs_transaction_handle *th, struct inode *, | |||
| 2097 | b_blocknr_t, int for_unformatted); | 2097 | b_blocknr_t, int for_unformatted); |
| 2098 | int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *, b_blocknr_t *, int, | 2098 | int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *, b_blocknr_t *, int, |
| 2099 | int); | 2099 | int); |
| 2100 | extern inline int reiserfs_new_form_blocknrs(struct tree_balance *tb, | 2100 | static inline int reiserfs_new_form_blocknrs(struct tree_balance *tb, |
| 2101 | b_blocknr_t * new_blocknrs, | 2101 | b_blocknr_t * new_blocknrs, |
| 2102 | int amount_needed) | 2102 | int amount_needed) |
| 2103 | { | 2103 | { |
| @@ -2113,7 +2113,7 @@ extern inline int reiserfs_new_form_blocknrs(struct tree_balance *tb, | |||
| 2113 | 0); | 2113 | 0); |
| 2114 | } | 2114 | } |
| 2115 | 2115 | ||
| 2116 | extern inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle | 2116 | static inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle |
| 2117 | *th, struct inode *inode, | 2117 | *th, struct inode *inode, |
| 2118 | b_blocknr_t * new_blocknrs, | 2118 | b_blocknr_t * new_blocknrs, |
| 2119 | struct path *path, long block) | 2119 | struct path *path, long block) |
| @@ -2130,7 +2130,7 @@ extern inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle | |||
| 2130 | } | 2130 | } |
| 2131 | 2131 | ||
| 2132 | #ifdef REISERFS_PREALLOCATE | 2132 | #ifdef REISERFS_PREALLOCATE |
| 2133 | extern inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle | 2133 | static inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle |
| 2134 | *th, struct inode *inode, | 2134 | *th, struct inode *inode, |
| 2135 | b_blocknr_t * new_blocknrs, | 2135 | b_blocknr_t * new_blocknrs, |
| 2136 | struct path *path, long block) | 2136 | struct path *path, long block) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index c551e6a1447e..4b83cb230006 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -114,6 +114,7 @@ extern unsigned long nr_iowait(void); | |||
| 114 | #define TASK_TRACED 8 | 114 | #define TASK_TRACED 8 |
| 115 | #define EXIT_ZOMBIE 16 | 115 | #define EXIT_ZOMBIE 16 |
| 116 | #define EXIT_DEAD 32 | 116 | #define EXIT_DEAD 32 |
| 117 | #define TASK_NONINTERACTIVE 64 | ||
| 117 | 118 | ||
| 118 | #define __set_task_state(tsk, state_value) \ | 119 | #define __set_task_state(tsk, state_value) \ |
| 119 | do { (tsk)->state = (state_value); } while (0) | 120 | do { (tsk)->state = (state_value); } while (0) |
| @@ -202,6 +203,8 @@ extern int in_sched_functions(unsigned long addr); | |||
| 202 | 203 | ||
| 203 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX | 204 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX |
| 204 | extern signed long FASTCALL(schedule_timeout(signed long timeout)); | 205 | extern signed long FASTCALL(schedule_timeout(signed long timeout)); |
| 206 | extern signed long schedule_timeout_interruptible(signed long timeout); | ||
| 207 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | ||
| 205 | asmlinkage void schedule(void); | 208 | asmlinkage void schedule(void); |
| 206 | 209 | ||
| 207 | struct namespace; | 210 | struct namespace; |
| @@ -782,6 +785,7 @@ struct task_struct { | |||
| 782 | short il_next; | 785 | short il_next; |
| 783 | #endif | 786 | #endif |
| 784 | #ifdef CONFIG_CPUSETS | 787 | #ifdef CONFIG_CPUSETS |
| 788 | short cpuset_sem_nest_depth; | ||
| 785 | struct cpuset *cpuset; | 789 | struct cpuset *cpuset; |
| 786 | nodemask_t mems_allowed; | 790 | nodemask_t mems_allowed; |
| 787 | int cpuset_mems_generation; | 791 | int cpuset_mems_generation; |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 42a6bea58af3..1f356f3bbc64 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -118,7 +118,8 @@ extern void kfree(const void *); | |||
| 118 | extern unsigned int ksize(const void *); | 118 | extern unsigned int ksize(const void *); |
| 119 | 119 | ||
| 120 | #ifdef CONFIG_NUMA | 120 | #ifdef CONFIG_NUMA |
| 121 | extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node); | 121 | extern void *kmem_cache_alloc_node(kmem_cache_t *, |
| 122 | unsigned int __nocast flags, int node); | ||
| 122 | extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node); | 123 | extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node); |
| 123 | #else | 124 | #else |
| 124 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) | 125 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d6ba068719b6..cdc99a27840d 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
| @@ -2,7 +2,48 @@ | |||
| 2 | #define __LINUX_SPINLOCK_H | 2 | #define __LINUX_SPINLOCK_H |
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * include/linux/spinlock.h - generic locking declarations | 5 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
| 6 | * | ||
| 7 | * here's the role of the various spinlock/rwlock related include files: | ||
| 8 | * | ||
| 9 | * on SMP builds: | ||
| 10 | * | ||
| 11 | * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the | ||
| 12 | * initializers | ||
| 13 | * | ||
| 14 | * linux/spinlock_types.h: | ||
| 15 | * defines the generic type and initializers | ||
| 16 | * | ||
| 17 | * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel | ||
| 18 | * implementations, mostly inline assembly code | ||
| 19 | * | ||
| 20 | * (also included on UP-debug builds:) | ||
| 21 | * | ||
| 22 | * linux/spinlock_api_smp.h: | ||
| 23 | * contains the prototypes for the _spin_*() APIs. | ||
| 24 | * | ||
| 25 | * linux/spinlock.h: builds the final spin_*() APIs. | ||
| 26 | * | ||
| 27 | * on UP builds: | ||
| 28 | * | ||
| 29 | * linux/spinlock_type_up.h: | ||
| 30 | * contains the generic, simplified UP spinlock type. | ||
| 31 | * (which is an empty structure on non-debug builds) | ||
| 32 | * | ||
| 33 | * linux/spinlock_types.h: | ||
| 34 | * defines the generic type and initializers | ||
| 35 | * | ||
| 36 | * linux/spinlock_up.h: | ||
| 37 | * contains the __raw_spin_*()/etc. version of UP | ||
| 38 | * builds. (which are NOPs on non-debug, non-preempt | ||
| 39 | * builds) | ||
| 40 | * | ||
| 41 | * (included on UP-non-debug builds:) | ||
| 42 | * | ||
| 43 | * linux/spinlock_api_up.h: | ||
| 44 | * builds the _spin_*() APIs. | ||
| 45 | * | ||
| 46 | * linux/spinlock.h: builds the final spin_*() APIs. | ||
| 6 | */ | 47 | */ |
| 7 | 48 | ||
| 8 | #include <linux/config.h> | 49 | #include <linux/config.h> |
| @@ -13,7 +54,6 @@ | |||
| 13 | #include <linux/kernel.h> | 54 | #include <linux/kernel.h> |
| 14 | #include <linux/stringify.h> | 55 | #include <linux/stringify.h> |
| 15 | 56 | ||
| 16 | #include <asm/processor.h> /* for cpu relax */ | ||
| 17 | #include <asm/system.h> | 57 | #include <asm/system.h> |
| 18 | 58 | ||
| 19 | /* | 59 | /* |
| @@ -35,423 +75,84 @@ | |||
| 35 | #define __lockfunc fastcall __attribute__((section(".spinlock.text"))) | 75 | #define __lockfunc fastcall __attribute__((section(".spinlock.text"))) |
| 36 | 76 | ||
| 37 | /* | 77 | /* |
| 38 | * If CONFIG_SMP is set, pull in the _raw_* definitions | 78 | * Pull the raw_spinlock_t and raw_rwlock_t definitions: |
| 39 | */ | 79 | */ |
| 40 | #ifdef CONFIG_SMP | 80 | #include <linux/spinlock_types.h> |
| 41 | |||
| 42 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | ||
| 43 | #include <asm/spinlock.h> | ||
| 44 | |||
| 45 | int __lockfunc _spin_trylock(spinlock_t *lock); | ||
| 46 | int __lockfunc _read_trylock(rwlock_t *lock); | ||
| 47 | int __lockfunc _write_trylock(rwlock_t *lock); | ||
| 48 | |||
| 49 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); | ||
| 50 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
| 51 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
| 52 | |||
| 53 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t); | ||
| 54 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
| 55 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
| 56 | |||
| 57 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) __acquires(spinlock_t); | ||
| 58 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); | ||
| 59 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); | ||
| 60 | |||
| 61 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t); | ||
| 62 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); | ||
| 63 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
| 64 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
| 65 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
| 66 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
| 67 | |||
| 68 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(spinlock_t); | ||
| 69 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t); | ||
| 70 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t); | ||
| 71 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); | ||
| 72 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
| 73 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
| 74 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); | ||
| 75 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
| 76 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
| 77 | |||
| 78 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | ||
| 79 | int __lockfunc generic_raw_read_trylock(rwlock_t *lock); | ||
| 80 | int in_lock_functions(unsigned long addr); | ||
| 81 | |||
| 82 | #else | ||
| 83 | 81 | ||
| 84 | #define in_lock_functions(ADDR) 0 | 82 | extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); |
| 85 | 83 | ||
| 86 | #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK) | ||
| 87 | # define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) | ||
| 88 | # define ATOMIC_DEC_AND_LOCK | ||
| 89 | #endif | ||
| 90 | |||
| 91 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 92 | |||
| 93 | #define SPINLOCK_MAGIC 0x1D244B3C | ||
| 94 | typedef struct { | ||
| 95 | unsigned long magic; | ||
| 96 | volatile unsigned long lock; | ||
| 97 | volatile unsigned int babble; | ||
| 98 | const char *module; | ||
| 99 | char *owner; | ||
| 100 | int oline; | ||
| 101 | } spinlock_t; | ||
| 102 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0} | ||
| 103 | |||
| 104 | #define spin_lock_init(x) \ | ||
| 105 | do { \ | ||
| 106 | (x)->magic = SPINLOCK_MAGIC; \ | ||
| 107 | (x)->lock = 0; \ | ||
| 108 | (x)->babble = 5; \ | ||
| 109 | (x)->module = __FILE__; \ | ||
| 110 | (x)->owner = NULL; \ | ||
| 111 | (x)->oline = 0; \ | ||
| 112 | } while (0) | ||
| 113 | |||
| 114 | #define CHECK_LOCK(x) \ | ||
| 115 | do { \ | ||
| 116 | if ((x)->magic != SPINLOCK_MAGIC) { \ | ||
| 117 | printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \ | ||
| 118 | __FILE__, __LINE__, (x)); \ | ||
| 119 | } \ | ||
| 120 | } while(0) | ||
| 121 | |||
| 122 | #define _raw_spin_lock(x) \ | ||
| 123 | do { \ | ||
| 124 | CHECK_LOCK(x); \ | ||
| 125 | if ((x)->lock&&(x)->babble) { \ | ||
| 126 | (x)->babble--; \ | ||
| 127 | printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \ | ||
| 128 | __FILE__,__LINE__, (x)->module, \ | ||
| 129 | (x), (x)->owner, (x)->oline); \ | ||
| 130 | } \ | ||
| 131 | (x)->lock = 1; \ | ||
| 132 | (x)->owner = __FILE__; \ | ||
| 133 | (x)->oline = __LINE__; \ | ||
| 134 | } while (0) | ||
| 135 | |||
| 136 | /* without debugging, spin_is_locked on UP always says | ||
| 137 | * FALSE. --> printk if already locked. */ | ||
| 138 | #define spin_is_locked(x) \ | ||
| 139 | ({ \ | ||
| 140 | CHECK_LOCK(x); \ | ||
| 141 | if ((x)->lock&&(x)->babble) { \ | ||
| 142 | (x)->babble--; \ | ||
| 143 | printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \ | ||
| 144 | __FILE__,__LINE__, (x)->module, \ | ||
| 145 | (x), (x)->owner, (x)->oline); \ | ||
| 146 | } \ | ||
| 147 | 0; \ | ||
| 148 | }) | ||
| 149 | |||
| 150 | /* with debugging, assert_spin_locked() on UP does check | ||
| 151 | * the lock value properly */ | ||
| 152 | #define assert_spin_locked(x) \ | ||
| 153 | ({ \ | ||
| 154 | CHECK_LOCK(x); \ | ||
| 155 | BUG_ON(!(x)->lock); \ | ||
| 156 | }) | ||
| 157 | |||
| 158 | /* without debugging, spin_trylock on UP always says | ||
| 159 | * TRUE. --> printk if already locked. */ | ||
| 160 | #define _raw_spin_trylock(x) \ | ||
| 161 | ({ \ | ||
| 162 | CHECK_LOCK(x); \ | ||
| 163 | if ((x)->lock&&(x)->babble) { \ | ||
| 164 | (x)->babble--; \ | ||
| 165 | printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \ | ||
| 166 | __FILE__,__LINE__, (x)->module, \ | ||
| 167 | (x), (x)->owner, (x)->oline); \ | ||
| 168 | } \ | ||
| 169 | (x)->lock = 1; \ | ||
| 170 | (x)->owner = __FILE__; \ | ||
| 171 | (x)->oline = __LINE__; \ | ||
| 172 | 1; \ | ||
| 173 | }) | ||
| 174 | |||
| 175 | #define spin_unlock_wait(x) \ | ||
| 176 | do { \ | ||
| 177 | CHECK_LOCK(x); \ | ||
| 178 | if ((x)->lock&&(x)->babble) { \ | ||
| 179 | (x)->babble--; \ | ||
| 180 | printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \ | ||
| 181 | __FILE__,__LINE__, (x)->module, (x), \ | ||
| 182 | (x)->owner, (x)->oline); \ | ||
| 183 | }\ | ||
| 184 | } while (0) | ||
| 185 | |||
| 186 | #define _raw_spin_unlock(x) \ | ||
| 187 | do { \ | ||
| 188 | CHECK_LOCK(x); \ | ||
| 189 | if (!(x)->lock&&(x)->babble) { \ | ||
| 190 | (x)->babble--; \ | ||
| 191 | printk("%s:%d: spin_unlock(%s:%p) not locked\n", \ | ||
| 192 | __FILE__,__LINE__, (x)->module, (x));\ | ||
| 193 | } \ | ||
| 194 | (x)->lock = 0; \ | ||
| 195 | } while (0) | ||
| 196 | #else | ||
| 197 | /* | 84 | /* |
| 198 | * gcc versions before ~2.95 have a nasty bug with empty initializers. | 85 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): |
| 199 | */ | 86 | */ |
| 200 | #if (__GNUC__ > 2) | 87 | #if defined(CONFIG_SMP) |
| 201 | typedef struct { } spinlock_t; | 88 | # include <asm/spinlock.h> |
| 202 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { } | ||
| 203 | #else | 89 | #else |
| 204 | typedef struct { int gcc_is_buggy; } spinlock_t; | 90 | # include <linux/spinlock_up.h> |
| 205 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
| 206 | #endif | 91 | #endif |
| 207 | 92 | ||
| 93 | #define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) | ||
| 94 | #define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0) | ||
| 95 | |||
| 96 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) | ||
| 97 | |||
| 98 | /** | ||
| 99 | * spin_unlock_wait - wait until the spinlock gets unlocked | ||
| 100 | * @lock: the spinlock in question. | ||
| 101 | */ | ||
| 102 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) | ||
| 103 | |||
| 208 | /* | 104 | /* |
| 209 | * If CONFIG_SMP is unset, declare the _raw_* definitions as nops | 105 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
| 210 | */ | 106 | */ |
| 211 | #define spin_lock_init(lock) do { (void)(lock); } while(0) | 107 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
| 212 | #define _raw_spin_lock(lock) do { (void)(lock); } while(0) | 108 | # include <linux/spinlock_api_smp.h> |
| 213 | #define spin_is_locked(lock) ((void)(lock), 0) | ||
| 214 | #define assert_spin_locked(lock) do { (void)(lock); } while(0) | ||
| 215 | #define _raw_spin_trylock(lock) (((void)(lock), 1)) | ||
| 216 | #define spin_unlock_wait(lock) (void)(lock) | ||
| 217 | #define _raw_spin_unlock(lock) do { (void)(lock); } while(0) | ||
| 218 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
| 219 | |||
| 220 | /* RW spinlocks: No debug version */ | ||
| 221 | |||
| 222 | #if (__GNUC__ > 2) | ||
| 223 | typedef struct { } rwlock_t; | ||
| 224 | #define RW_LOCK_UNLOCKED (rwlock_t) { } | ||
| 225 | #else | 109 | #else |
| 226 | typedef struct { int gcc_is_buggy; } rwlock_t; | 110 | # include <linux/spinlock_api_up.h> |
| 227 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
| 228 | #endif | 111 | #endif |
| 229 | 112 | ||
| 230 | #define rwlock_init(lock) do { (void)(lock); } while(0) | 113 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 231 | #define _raw_read_lock(lock) do { (void)(lock); } while(0) | 114 | extern void _raw_spin_lock(spinlock_t *lock); |
| 232 | #define _raw_read_unlock(lock) do { (void)(lock); } while(0) | 115 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
| 233 | #define _raw_write_lock(lock) do { (void)(lock); } while(0) | 116 | extern int _raw_spin_trylock(spinlock_t *lock); |
| 234 | #define _raw_write_unlock(lock) do { (void)(lock); } while(0) | 117 | extern void _raw_spin_unlock(spinlock_t *lock); |
| 235 | #define read_can_lock(lock) (((void)(lock), 1)) | 118 | |
| 236 | #define write_can_lock(lock) (((void)(lock), 1)) | 119 | extern void _raw_read_lock(rwlock_t *lock); |
| 237 | #define _raw_read_trylock(lock) ({ (void)(lock); (1); }) | 120 | extern int _raw_read_trylock(rwlock_t *lock); |
| 238 | #define _raw_write_trylock(lock) ({ (void)(lock); (1); }) | 121 | extern void _raw_read_unlock(rwlock_t *lock); |
| 239 | 122 | extern void _raw_write_lock(rwlock_t *lock); | |
| 240 | #define _spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \ | 123 | extern int _raw_write_trylock(rwlock_t *lock); |
| 241 | 1 : ({preempt_enable(); 0;});}) | 124 | extern void _raw_write_unlock(rwlock_t *lock); |
| 242 | 125 | #else | |
| 243 | #define _read_trylock(lock) ({preempt_disable();_raw_read_trylock(lock) ? \ | 126 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) |
| 244 | 1 : ({preempt_enable(); 0;});}) | 127 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) |
| 245 | 128 | # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) | |
| 246 | #define _write_trylock(lock) ({preempt_disable(); _raw_write_trylock(lock) ? \ | 129 | # define _raw_spin_lock_flags(lock, flags) \ |
| 247 | 1 : ({preempt_enable(); 0;});}) | 130 | __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) |
| 248 | 131 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) | |
| 249 | #define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \ | 132 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) |
| 250 | _raw_spin_trylock(lock) ? \ | 133 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) |
| 251 | 1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});}) | 134 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) |
| 252 | 135 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) | |
| 253 | #define _spin_lock(lock) \ | 136 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) |
| 254 | do { \ | 137 | #endif |
| 255 | preempt_disable(); \ | ||
| 256 | _raw_spin_lock(lock); \ | ||
| 257 | __acquire(lock); \ | ||
| 258 | } while(0) | ||
| 259 | |||
| 260 | #define _write_lock(lock) \ | ||
| 261 | do { \ | ||
| 262 | preempt_disable(); \ | ||
| 263 | _raw_write_lock(lock); \ | ||
| 264 | __acquire(lock); \ | ||
| 265 | } while(0) | ||
| 266 | |||
| 267 | #define _read_lock(lock) \ | ||
| 268 | do { \ | ||
| 269 | preempt_disable(); \ | ||
| 270 | _raw_read_lock(lock); \ | ||
| 271 | __acquire(lock); \ | ||
| 272 | } while(0) | ||
| 273 | |||
| 274 | #define _spin_unlock(lock) \ | ||
| 275 | do { \ | ||
| 276 | _raw_spin_unlock(lock); \ | ||
| 277 | preempt_enable(); \ | ||
| 278 | __release(lock); \ | ||
| 279 | } while (0) | ||
| 280 | |||
| 281 | #define _write_unlock(lock) \ | ||
| 282 | do { \ | ||
| 283 | _raw_write_unlock(lock); \ | ||
| 284 | preempt_enable(); \ | ||
| 285 | __release(lock); \ | ||
| 286 | } while(0) | ||
| 287 | |||
| 288 | #define _read_unlock(lock) \ | ||
| 289 | do { \ | ||
| 290 | _raw_read_unlock(lock); \ | ||
| 291 | preempt_enable(); \ | ||
| 292 | __release(lock); \ | ||
| 293 | } while(0) | ||
| 294 | |||
| 295 | #define _spin_lock_irqsave(lock, flags) \ | ||
| 296 | do { \ | ||
| 297 | local_irq_save(flags); \ | ||
| 298 | preempt_disable(); \ | ||
| 299 | _raw_spin_lock(lock); \ | ||
| 300 | __acquire(lock); \ | ||
| 301 | } while (0) | ||
| 302 | |||
| 303 | #define _spin_lock_irq(lock) \ | ||
| 304 | do { \ | ||
| 305 | local_irq_disable(); \ | ||
| 306 | preempt_disable(); \ | ||
| 307 | _raw_spin_lock(lock); \ | ||
| 308 | __acquire(lock); \ | ||
| 309 | } while (0) | ||
| 310 | |||
| 311 | #define _spin_lock_bh(lock) \ | ||
| 312 | do { \ | ||
| 313 | local_bh_disable(); \ | ||
| 314 | preempt_disable(); \ | ||
| 315 | _raw_spin_lock(lock); \ | ||
| 316 | __acquire(lock); \ | ||
| 317 | } while (0) | ||
| 318 | |||
| 319 | #define _read_lock_irqsave(lock, flags) \ | ||
| 320 | do { \ | ||
| 321 | local_irq_save(flags); \ | ||
| 322 | preempt_disable(); \ | ||
| 323 | _raw_read_lock(lock); \ | ||
| 324 | __acquire(lock); \ | ||
| 325 | } while (0) | ||
| 326 | |||
| 327 | #define _read_lock_irq(lock) \ | ||
| 328 | do { \ | ||
| 329 | local_irq_disable(); \ | ||
| 330 | preempt_disable(); \ | ||
| 331 | _raw_read_lock(lock); \ | ||
| 332 | __acquire(lock); \ | ||
| 333 | } while (0) | ||
| 334 | |||
| 335 | #define _read_lock_bh(lock) \ | ||
| 336 | do { \ | ||
| 337 | local_bh_disable(); \ | ||
| 338 | preempt_disable(); \ | ||
| 339 | _raw_read_lock(lock); \ | ||
| 340 | __acquire(lock); \ | ||
| 341 | } while (0) | ||
| 342 | |||
| 343 | #define _write_lock_irqsave(lock, flags) \ | ||
| 344 | do { \ | ||
| 345 | local_irq_save(flags); \ | ||
| 346 | preempt_disable(); \ | ||
| 347 | _raw_write_lock(lock); \ | ||
| 348 | __acquire(lock); \ | ||
| 349 | } while (0) | ||
| 350 | 138 | ||
| 351 | #define _write_lock_irq(lock) \ | 139 | #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) |
| 352 | do { \ | 140 | #define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) |
| 353 | local_irq_disable(); \ | ||
| 354 | preempt_disable(); \ | ||
| 355 | _raw_write_lock(lock); \ | ||
| 356 | __acquire(lock); \ | ||
| 357 | } while (0) | ||
| 358 | |||
| 359 | #define _write_lock_bh(lock) \ | ||
| 360 | do { \ | ||
| 361 | local_bh_disable(); \ | ||
| 362 | preempt_disable(); \ | ||
| 363 | _raw_write_lock(lock); \ | ||
| 364 | __acquire(lock); \ | ||
| 365 | } while (0) | ||
| 366 | |||
| 367 | #define _spin_unlock_irqrestore(lock, flags) \ | ||
| 368 | do { \ | ||
| 369 | _raw_spin_unlock(lock); \ | ||
| 370 | local_irq_restore(flags); \ | ||
| 371 | preempt_enable(); \ | ||
| 372 | __release(lock); \ | ||
| 373 | } while (0) | ||
| 374 | |||
| 375 | #define _spin_unlock_irq(lock) \ | ||
| 376 | do { \ | ||
| 377 | _raw_spin_unlock(lock); \ | ||
| 378 | local_irq_enable(); \ | ||
| 379 | preempt_enable(); \ | ||
| 380 | __release(lock); \ | ||
| 381 | } while (0) | ||
| 382 | |||
| 383 | #define _spin_unlock_bh(lock) \ | ||
| 384 | do { \ | ||
| 385 | _raw_spin_unlock(lock); \ | ||
| 386 | preempt_enable_no_resched(); \ | ||
| 387 | local_bh_enable(); \ | ||
| 388 | __release(lock); \ | ||
| 389 | } while (0) | ||
| 390 | |||
| 391 | #define _write_unlock_bh(lock) \ | ||
| 392 | do { \ | ||
| 393 | _raw_write_unlock(lock); \ | ||
| 394 | preempt_enable_no_resched(); \ | ||
| 395 | local_bh_enable(); \ | ||
| 396 | __release(lock); \ | ||
| 397 | } while (0) | ||
| 398 | |||
| 399 | #define _read_unlock_irqrestore(lock, flags) \ | ||
| 400 | do { \ | ||
| 401 | _raw_read_unlock(lock); \ | ||
| 402 | local_irq_restore(flags); \ | ||
| 403 | preempt_enable(); \ | ||
| 404 | __release(lock); \ | ||
| 405 | } while (0) | ||
| 406 | |||
| 407 | #define _write_unlock_irqrestore(lock, flags) \ | ||
| 408 | do { \ | ||
| 409 | _raw_write_unlock(lock); \ | ||
| 410 | local_irq_restore(flags); \ | ||
| 411 | preempt_enable(); \ | ||
| 412 | __release(lock); \ | ||
| 413 | } while (0) | ||
| 414 | |||
| 415 | #define _read_unlock_irq(lock) \ | ||
| 416 | do { \ | ||
| 417 | _raw_read_unlock(lock); \ | ||
| 418 | local_irq_enable(); \ | ||
| 419 | preempt_enable(); \ | ||
| 420 | __release(lock); \ | ||
| 421 | } while (0) | ||
| 422 | |||
| 423 | #define _read_unlock_bh(lock) \ | ||
| 424 | do { \ | ||
| 425 | _raw_read_unlock(lock); \ | ||
| 426 | preempt_enable_no_resched(); \ | ||
| 427 | local_bh_enable(); \ | ||
| 428 | __release(lock); \ | ||
| 429 | } while (0) | ||
| 430 | |||
| 431 | #define _write_unlock_irq(lock) \ | ||
| 432 | do { \ | ||
| 433 | _raw_write_unlock(lock); \ | ||
| 434 | local_irq_enable(); \ | ||
| 435 | preempt_enable(); \ | ||
| 436 | __release(lock); \ | ||
| 437 | } while (0) | ||
| 438 | |||
| 439 | #endif /* !SMP */ | ||
| 440 | 141 | ||
| 441 | /* | 142 | /* |
| 442 | * Define the various spin_lock and rw_lock methods. Note we define these | 143 | * Define the various spin_lock and rw_lock methods. Note we define these |
| 443 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various | 144 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various |
| 444 | * methods are defined as nops in the case they are not required. | 145 | * methods are defined as nops in the case they are not required. |
| 445 | */ | 146 | */ |
| 446 | #define spin_trylock(lock) __cond_lock(_spin_trylock(lock)) | 147 | #define spin_trylock(lock) __cond_lock(_spin_trylock(lock)) |
| 447 | #define read_trylock(lock) __cond_lock(_read_trylock(lock)) | 148 | #define read_trylock(lock) __cond_lock(_read_trylock(lock)) |
| 448 | #define write_trylock(lock) __cond_lock(_write_trylock(lock)) | 149 | #define write_trylock(lock) __cond_lock(_write_trylock(lock)) |
| 449 | 150 | ||
| 450 | #define spin_lock(lock) _spin_lock(lock) | 151 | #define spin_lock(lock) _spin_lock(lock) |
| 451 | #define write_lock(lock) _write_lock(lock) | 152 | #define write_lock(lock) _write_lock(lock) |
| 452 | #define read_lock(lock) _read_lock(lock) | 153 | #define read_lock(lock) _read_lock(lock) |
| 453 | 154 | ||
| 454 | #ifdef CONFIG_SMP | 155 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
| 455 | #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) | 156 | #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) |
| 456 | #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) | 157 | #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) |
| 457 | #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) | 158 | #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) |
| @@ -470,137 +171,59 @@ do { \ | |||
| 470 | #define write_lock_irq(lock) _write_lock_irq(lock) | 171 | #define write_lock_irq(lock) _write_lock_irq(lock) |
| 471 | #define write_lock_bh(lock) _write_lock_bh(lock) | 172 | #define write_lock_bh(lock) _write_lock_bh(lock) |
| 472 | 173 | ||
| 473 | #define spin_unlock(lock) _spin_unlock(lock) | 174 | #define spin_unlock(lock) _spin_unlock(lock) |
| 474 | #define write_unlock(lock) _write_unlock(lock) | 175 | #define write_unlock(lock) _write_unlock(lock) |
| 475 | #define read_unlock(lock) _read_unlock(lock) | 176 | #define read_unlock(lock) _read_unlock(lock) |
| 476 | 177 | ||
| 477 | #define spin_unlock_irqrestore(lock, flags) _spin_unlock_irqrestore(lock, flags) | 178 | #define spin_unlock_irqrestore(lock, flags) \ |
| 179 | _spin_unlock_irqrestore(lock, flags) | ||
| 478 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) | 180 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
| 479 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) | 181 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) |
| 480 | 182 | ||
| 481 | #define read_unlock_irqrestore(lock, flags) _read_unlock_irqrestore(lock, flags) | 183 | #define read_unlock_irqrestore(lock, flags) \ |
| 482 | #define read_unlock_irq(lock) _read_unlock_irq(lock) | 184 | _read_unlock_irqrestore(lock, flags) |
| 483 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | 185 | #define read_unlock_irq(lock) _read_unlock_irq(lock) |
| 186 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | ||
| 484 | 187 | ||
| 485 | #define write_unlock_irqrestore(lock, flags) _write_unlock_irqrestore(lock, flags) | 188 | #define write_unlock_irqrestore(lock, flags) \ |
| 486 | #define write_unlock_irq(lock) _write_unlock_irq(lock) | 189 | _write_unlock_irqrestore(lock, flags) |
| 487 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | 190 | #define write_unlock_irq(lock) _write_unlock_irq(lock) |
| 191 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | ||
| 488 | 192 | ||
| 489 | #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) | 193 | #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) |
| 490 | 194 | ||
| 491 | #define spin_trylock_irq(lock) \ | 195 | #define spin_trylock_irq(lock) \ |
| 492 | ({ \ | 196 | ({ \ |
| 493 | local_irq_disable(); \ | 197 | local_irq_disable(); \ |
| 494 | _spin_trylock(lock) ? \ | 198 | _spin_trylock(lock) ? \ |
| 495 | 1 : ({local_irq_enable(); 0; }); \ | 199 | 1 : ({ local_irq_enable(); 0; }); \ |
| 496 | }) | 200 | }) |
| 497 | 201 | ||
| 498 | #define spin_trylock_irqsave(lock, flags) \ | 202 | #define spin_trylock_irqsave(lock, flags) \ |
| 499 | ({ \ | 203 | ({ \ |
| 500 | local_irq_save(flags); \ | 204 | local_irq_save(flags); \ |
| 501 | _spin_trylock(lock) ? \ | 205 | _spin_trylock(lock) ? \ |
| 502 | 1 : ({local_irq_restore(flags); 0;}); \ | 206 | 1 : ({ local_irq_restore(flags); 0; }); \ |
| 503 | }) | 207 | }) |
| 504 | 208 | ||
| 505 | #ifdef CONFIG_LOCKMETER | ||
| 506 | extern void _metered_spin_lock (spinlock_t *lock); | ||
| 507 | extern void _metered_spin_unlock (spinlock_t *lock); | ||
| 508 | extern int _metered_spin_trylock(spinlock_t *lock); | ||
| 509 | extern void _metered_read_lock (rwlock_t *lock); | ||
| 510 | extern void _metered_read_unlock (rwlock_t *lock); | ||
| 511 | extern void _metered_write_lock (rwlock_t *lock); | ||
| 512 | extern void _metered_write_unlock (rwlock_t *lock); | ||
| 513 | extern int _metered_read_trylock (rwlock_t *lock); | ||
| 514 | extern int _metered_write_trylock(rwlock_t *lock); | ||
| 515 | #endif | ||
| 516 | |||
| 517 | /* "lock on reference count zero" */ | ||
| 518 | #ifndef ATOMIC_DEC_AND_LOCK | ||
| 519 | #include <asm/atomic.h> | ||
| 520 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | ||
| 521 | #endif | ||
| 522 | |||
| 523 | #define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock)) | ||
| 524 | |||
| 525 | /* | ||
| 526 | * bit-based spin_lock() | ||
| 527 | * | ||
| 528 | * Don't use this unless you really need to: spin_lock() and spin_unlock() | ||
| 529 | * are significantly faster. | ||
| 530 | */ | ||
| 531 | static inline void bit_spin_lock(int bitnum, unsigned long *addr) | ||
| 532 | { | ||
| 533 | /* | ||
| 534 | * Assuming the lock is uncontended, this never enters | ||
| 535 | * the body of the outer loop. If it is contended, then | ||
| 536 | * within the inner loop a non-atomic test is used to | ||
| 537 | * busywait with less bus contention for a good time to | ||
| 538 | * attempt to acquire the lock bit. | ||
| 539 | */ | ||
| 540 | preempt_disable(); | ||
| 541 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
| 542 | while (test_and_set_bit(bitnum, addr)) { | ||
| 543 | while (test_bit(bitnum, addr)) { | ||
| 544 | preempt_enable(); | ||
| 545 | cpu_relax(); | ||
| 546 | preempt_disable(); | ||
| 547 | } | ||
| 548 | } | ||
| 549 | #endif | ||
| 550 | __acquire(bitlock); | ||
| 551 | } | ||
| 552 | |||
| 553 | /* | 209 | /* |
| 554 | * Return true if it was acquired | 210 | * Pull the atomic_t declaration: |
| 211 | * (asm-mips/atomic.h needs above definitions) | ||
| 555 | */ | 212 | */ |
| 556 | static inline int bit_spin_trylock(int bitnum, unsigned long *addr) | 213 | #include <asm/atomic.h> |
| 557 | { | 214 | /** |
| 558 | preempt_disable(); | 215 | * atomic_dec_and_lock - lock on reaching reference count zero |
| 559 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 216 | * @atomic: the atomic counter |
| 560 | if (test_and_set_bit(bitnum, addr)) { | 217 | * @lock: the spinlock in question |
| 561 | preempt_enable(); | ||
| 562 | return 0; | ||
| 563 | } | ||
| 564 | #endif | ||
| 565 | __acquire(bitlock); | ||
| 566 | return 1; | ||
| 567 | } | ||
| 568 | |||
| 569 | /* | ||
| 570 | * bit-based spin_unlock() | ||
| 571 | */ | ||
| 572 | static inline void bit_spin_unlock(int bitnum, unsigned long *addr) | ||
| 573 | { | ||
| 574 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
| 575 | BUG_ON(!test_bit(bitnum, addr)); | ||
| 576 | smp_mb__before_clear_bit(); | ||
| 577 | clear_bit(bitnum, addr); | ||
| 578 | #endif | ||
| 579 | preempt_enable(); | ||
| 580 | __release(bitlock); | ||
| 581 | } | ||
| 582 | |||
| 583 | /* | ||
| 584 | * Return true if the lock is held. | ||
| 585 | */ | 218 | */ |
| 586 | static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) | 219 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
| 587 | { | 220 | #define atomic_dec_and_lock(atomic, lock) \ |
| 588 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 221 | __cond_lock(_atomic_dec_and_lock(atomic, lock)) |
| 589 | return test_bit(bitnum, addr); | ||
| 590 | #elif defined CONFIG_PREEMPT | ||
| 591 | return preempt_count(); | ||
| 592 | #else | ||
| 593 | return 1; | ||
| 594 | #endif | ||
| 595 | } | ||
| 596 | |||
| 597 | #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED | ||
| 598 | #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED | ||
| 599 | 222 | ||
| 600 | /** | 223 | /** |
| 601 | * spin_can_lock - would spin_trylock() succeed? | 224 | * spin_can_lock - would spin_trylock() succeed? |
| 602 | * @lock: the spinlock in question. | 225 | * @lock: the spinlock in question. |
| 603 | */ | 226 | */ |
| 604 | #define spin_can_lock(lock) (!spin_is_locked(lock)) | 227 | #define spin_can_lock(lock) (!spin_is_locked(lock)) |
| 605 | 228 | ||
| 606 | #endif /* __LINUX_SPINLOCK_H */ | 229 | #endif /* __LINUX_SPINLOCK_H */ |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h new file mode 100644 index 000000000000..78e6989ffb54 --- /dev/null +++ b/include/linux/spinlock_api_smp.h | |||
| @@ -0,0 +1,57 @@ | |||
| 1 | #ifndef __LINUX_SPINLOCK_API_SMP_H | ||
| 2 | #define __LINUX_SPINLOCK_API_SMP_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | /* | ||
| 9 | * include/linux/spinlock_api_smp.h | ||
| 10 | * | ||
| 11 | * spinlock API declarations on SMP (and debug) | ||
| 12 | * (implemented in kernel/spinlock.c) | ||
| 13 | * | ||
| 14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
| 15 | * Released under the General Public License (GPL). | ||
| 16 | */ | ||
| 17 | |||
| 18 | int in_lock_functions(unsigned long addr); | ||
| 19 | |||
| 20 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | ||
| 21 | |||
| 22 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); | ||
| 23 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
| 24 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
| 25 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); | ||
| 26 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
| 27 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
| 28 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t); | ||
| 29 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
| 30 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
| 31 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
| 32 | __acquires(spinlock_t); | ||
| 33 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
| 34 | __acquires(rwlock_t); | ||
| 35 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
| 36 | __acquires(rwlock_t); | ||
| 37 | int __lockfunc _spin_trylock(spinlock_t *lock); | ||
| 38 | int __lockfunc _read_trylock(rwlock_t *lock); | ||
| 39 | int __lockfunc _write_trylock(rwlock_t *lock); | ||
| 40 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | ||
| 41 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t); | ||
| 42 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
| 43 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
| 44 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t); | ||
| 45 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
| 46 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
| 47 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t); | ||
| 48 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
| 49 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
| 50 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | ||
| 51 | __releases(spinlock_t); | ||
| 52 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
| 53 | __releases(rwlock_t); | ||
| 54 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
| 55 | __releases(rwlock_t); | ||
| 56 | |||
| 57 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ | ||
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h new file mode 100644 index 000000000000..cd81cee566f4 --- /dev/null +++ b/include/linux/spinlock_api_up.h | |||
| @@ -0,0 +1,80 @@ | |||
| 1 | #ifndef __LINUX_SPINLOCK_API_UP_H | ||
| 2 | #define __LINUX_SPINLOCK_API_UP_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | /* | ||
| 9 | * include/linux/spinlock_api_up.h | ||
| 10 | * | ||
| 11 | * spinlock API implementation on UP-nondebug (inlined implementation) | ||
| 12 | * | ||
| 13 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
| 14 | * Released under the General Public License (GPL). | ||
| 15 | */ | ||
| 16 | |||
| 17 | #define in_lock_functions(ADDR) 0 | ||
| 18 | |||
| 19 | #define assert_spin_locked(lock) do { (void)(lock); } while (0) | ||
| 20 | |||
| 21 | /* | ||
| 22 | * In the UP-nondebug case there's no real locking going on, so the | ||
| 23 | * only thing we have to do is to keep the preempt counts and irq | ||
| 24 | * flags straight, to supress compiler warnings of unused lock | ||
| 25 | * variables, and to add the proper checker annotations: | ||
| 26 | */ | ||
| 27 | #define __LOCK(lock) \ | ||
| 28 | do { preempt_disable(); __acquire(lock); (void)(lock); } while (0) | ||
| 29 | |||
| 30 | #define __LOCK_BH(lock) \ | ||
| 31 | do { local_bh_disable(); __LOCK(lock); } while (0) | ||
| 32 | |||
| 33 | #define __LOCK_IRQ(lock) \ | ||
| 34 | do { local_irq_disable(); __LOCK(lock); } while (0) | ||
| 35 | |||
| 36 | #define __LOCK_IRQSAVE(lock, flags) \ | ||
| 37 | do { local_irq_save(flags); __LOCK(lock); } while (0) | ||
| 38 | |||
| 39 | #define __UNLOCK(lock) \ | ||
| 40 | do { preempt_enable(); __release(lock); (void)(lock); } while (0) | ||
| 41 | |||
| 42 | #define __UNLOCK_BH(lock) \ | ||
| 43 | do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) | ||
| 44 | |||
| 45 | #define __UNLOCK_IRQ(lock) \ | ||
| 46 | do { local_irq_enable(); __UNLOCK(lock); } while (0) | ||
| 47 | |||
| 48 | #define __UNLOCK_IRQRESTORE(lock, flags) \ | ||
| 49 | do { local_irq_restore(flags); __UNLOCK(lock); } while (0) | ||
| 50 | |||
| 51 | #define _spin_lock(lock) __LOCK(lock) | ||
| 52 | #define _read_lock(lock) __LOCK(lock) | ||
| 53 | #define _write_lock(lock) __LOCK(lock) | ||
| 54 | #define _spin_lock_bh(lock) __LOCK_BH(lock) | ||
| 55 | #define _read_lock_bh(lock) __LOCK_BH(lock) | ||
| 56 | #define _write_lock_bh(lock) __LOCK_BH(lock) | ||
| 57 | #define _spin_lock_irq(lock) __LOCK_IRQ(lock) | ||
| 58 | #define _read_lock_irq(lock) __LOCK_IRQ(lock) | ||
| 59 | #define _write_lock_irq(lock) __LOCK_IRQ(lock) | ||
| 60 | #define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | ||
| 61 | #define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | ||
| 62 | #define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | ||
| 63 | #define _spin_trylock(lock) ({ __LOCK(lock); 1; }) | ||
| 64 | #define _read_trylock(lock) ({ __LOCK(lock); 1; }) | ||
| 65 | #define _write_trylock(lock) ({ __LOCK(lock); 1; }) | ||
| 66 | #define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) | ||
| 67 | #define _spin_unlock(lock) __UNLOCK(lock) | ||
| 68 | #define _read_unlock(lock) __UNLOCK(lock) | ||
| 69 | #define _write_unlock(lock) __UNLOCK(lock) | ||
| 70 | #define _spin_unlock_bh(lock) __UNLOCK_BH(lock) | ||
| 71 | #define _write_unlock_bh(lock) __UNLOCK_BH(lock) | ||
| 72 | #define _read_unlock_bh(lock) __UNLOCK_BH(lock) | ||
| 73 | #define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) | ||
| 74 | #define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) | ||
| 75 | #define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) | ||
| 76 | #define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | ||
| 77 | #define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | ||
| 78 | #define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | ||
| 79 | |||
| 80 | #endif /* __LINUX_SPINLOCK_API_UP_H */ | ||
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h new file mode 100644 index 000000000000..9cb51e070390 --- /dev/null +++ b/include/linux/spinlock_types.h | |||
| @@ -0,0 +1,67 @@ | |||
| 1 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 2 | #define __LINUX_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * include/linux/spinlock_types.h - generic spinlock type definitions | ||
| 6 | * and initializers | ||
| 7 | * | ||
| 8 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
| 9 | * Released under the General Public License (GPL). | ||
| 10 | */ | ||
| 11 | |||
| 12 | #if defined(CONFIG_SMP) | ||
| 13 | # include <asm/spinlock_types.h> | ||
| 14 | #else | ||
| 15 | # include <linux/spinlock_types_up.h> | ||
| 16 | #endif | ||
| 17 | |||
| 18 | typedef struct { | ||
| 19 | raw_spinlock_t raw_lock; | ||
| 20 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | ||
| 21 | unsigned int break_lock; | ||
| 22 | #endif | ||
| 23 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 24 | unsigned int magic, owner_cpu; | ||
| 25 | void *owner; | ||
| 26 | #endif | ||
| 27 | } spinlock_t; | ||
| 28 | |||
| 29 | #define SPINLOCK_MAGIC 0xdead4ead | ||
| 30 | |||
| 31 | typedef struct { | ||
| 32 | raw_rwlock_t raw_lock; | ||
| 33 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | ||
| 34 | unsigned int break_lock; | ||
| 35 | #endif | ||
| 36 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 37 | unsigned int magic, owner_cpu; | ||
| 38 | void *owner; | ||
| 39 | #endif | ||
| 40 | } rwlock_t; | ||
| 41 | |||
| 42 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
| 43 | |||
| 44 | #define SPINLOCK_OWNER_INIT ((void *)-1L) | ||
| 45 | |||
| 46 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 47 | # define SPIN_LOCK_UNLOCKED \ | ||
| 48 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ | ||
| 49 | .magic = SPINLOCK_MAGIC, \ | ||
| 50 | .owner = SPINLOCK_OWNER_INIT, \ | ||
| 51 | .owner_cpu = -1 } | ||
| 52 | #define RW_LOCK_UNLOCKED \ | ||
| 53 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ | ||
| 54 | .magic = RWLOCK_MAGIC, \ | ||
| 55 | .owner = SPINLOCK_OWNER_INIT, \ | ||
| 56 | .owner_cpu = -1 } | ||
| 57 | #else | ||
| 58 | # define SPIN_LOCK_UNLOCKED \ | ||
| 59 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } | ||
| 60 | #define RW_LOCK_UNLOCKED \ | ||
| 61 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } | ||
| 62 | #endif | ||
| 63 | |||
| 64 | #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED | ||
| 65 | #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED | ||
| 66 | |||
| 67 | #endif /* __LINUX_SPINLOCK_TYPES_H */ | ||
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h new file mode 100644 index 000000000000..def2d173a8db --- /dev/null +++ b/include/linux/spinlock_types_up.h | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | #ifndef __LINUX_SPINLOCK_TYPES_UP_H | ||
| 2 | #define __LINUX_SPINLOCK_TYPES_UP_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | /* | ||
| 9 | * include/linux/spinlock_types_up.h - spinlock type definitions for UP | ||
| 10 | * | ||
| 11 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
| 12 | * Released under the General Public License (GPL). | ||
| 13 | */ | ||
| 14 | |||
| 15 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 16 | |||
| 17 | typedef struct { | ||
| 18 | volatile unsigned int slock; | ||
| 19 | } raw_spinlock_t; | ||
| 20 | |||
| 21 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
| 22 | |||
| 23 | #else | ||
| 24 | |||
| 25 | /* | ||
| 26 | * All gcc 2.95 versions and early versions of 2.96 have a nasty bug | ||
| 27 | * with empty initializers. | ||
| 28 | */ | ||
| 29 | #if (__GNUC__ > 2) | ||
| 30 | typedef struct { } raw_spinlock_t; | ||
| 31 | |||
| 32 | #define __RAW_SPIN_LOCK_UNLOCKED { } | ||
| 33 | #else | ||
| 34 | typedef struct { int gcc_is_buggy; } raw_spinlock_t; | ||
| 35 | #define __RAW_SPIN_LOCK_UNLOCKED (raw_spinlock_t) { 0 } | ||
| 36 | #endif | ||
| 37 | |||
| 38 | #endif | ||
| 39 | |||
| 40 | #if (__GNUC__ > 2) | ||
| 41 | typedef struct { | ||
| 42 | /* no debug version on UP */ | ||
| 43 | } raw_rwlock_t; | ||
| 44 | |||
| 45 | #define __RAW_RW_LOCK_UNLOCKED { } | ||
| 46 | #else | ||
| 47 | typedef struct { int gcc_is_buggy; } raw_rwlock_t; | ||
| 48 | #define __RAW_RW_LOCK_UNLOCKED (raw_rwlock_t) { 0 } | ||
| 49 | #endif | ||
| 50 | |||
| 51 | #endif /* __LINUX_SPINLOCK_TYPES_UP_H */ | ||
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h new file mode 100644 index 000000000000..31accf2f0b13 --- /dev/null +++ b/include/linux/spinlock_up.h | |||
| @@ -0,0 +1,74 @@ | |||
| 1 | #ifndef __LINUX_SPINLOCK_UP_H | ||
| 2 | #define __LINUX_SPINLOCK_UP_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | /* | ||
| 9 | * include/linux/spinlock_up.h - UP-debug version of spinlocks. | ||
| 10 | * | ||
| 11 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
| 12 | * Released under the General Public License (GPL). | ||
| 13 | * | ||
| 14 | * In the debug case, 1 means unlocked, 0 means locked. (the values | ||
| 15 | * are inverted, to catch initialization bugs) | ||
| 16 | * | ||
| 17 | * No atomicity anywhere, we are on UP. | ||
| 18 | */ | ||
| 19 | |||
| 20 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 21 | |||
| 22 | #define __raw_spin_is_locked(x) ((x)->slock == 0) | ||
| 23 | |||
| 24 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
| 25 | { | ||
| 26 | lock->slock = 0; | ||
| 27 | } | ||
| 28 | |||
| 29 | static inline void | ||
| 30 | __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
| 31 | { | ||
| 32 | local_irq_save(flags); | ||
| 33 | lock->slock = 0; | ||
| 34 | } | ||
| 35 | |||
| 36 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
| 37 | { | ||
| 38 | char oldval = lock->slock; | ||
| 39 | |||
| 40 | lock->slock = 0; | ||
| 41 | |||
| 42 | return oldval > 0; | ||
| 43 | } | ||
| 44 | |||
| 45 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
| 46 | { | ||
| 47 | lock->slock = 1; | ||
| 48 | } | ||
| 49 | |||
| 50 | /* | ||
| 51 | * Read-write spinlocks. No debug version. | ||
| 52 | */ | ||
| 53 | #define __raw_read_lock(lock) do { (void)(lock); } while (0) | ||
| 54 | #define __raw_write_lock(lock) do { (void)(lock); } while (0) | ||
| 55 | #define __raw_read_trylock(lock) ({ (void)(lock); 1; }) | ||
| 56 | #define __raw_write_trylock(lock) ({ (void)(lock); 1; }) | ||
| 57 | #define __raw_read_unlock(lock) do { (void)(lock); } while (0) | ||
| 58 | #define __raw_write_unlock(lock) do { (void)(lock); } while (0) | ||
| 59 | |||
| 60 | #else /* DEBUG_SPINLOCK */ | ||
| 61 | #define __raw_spin_is_locked(lock) ((void)(lock), 0) | ||
| 62 | /* for sched.c and kernel_lock.c: */ | ||
| 63 | # define __raw_spin_lock(lock) do { (void)(lock); } while (0) | ||
| 64 | # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) | ||
| 65 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) | ||
| 66 | #endif /* DEBUG_SPINLOCK */ | ||
| 67 | |||
| 68 | #define __raw_read_can_lock(lock) (((void)(lock), 1)) | ||
| 69 | #define __raw_write_can_lock(lock) (((void)(lock), 1)) | ||
| 70 | |||
| 71 | #define __raw_spin_unlock_wait(lock) \ | ||
| 72 | do { cpu_relax(); } while (__raw_spin_is_locked(lock)) | ||
| 73 | |||
| 74 | #endif /* __LINUX_SPINLOCK_UP_H */ | ||
diff --git a/include/linux/time.h b/include/linux/time.h index c10d4c21c183..8e83f4e778bb 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
| @@ -28,17 +28,10 @@ struct timezone { | |||
| 28 | #ifdef __KERNEL__ | 28 | #ifdef __KERNEL__ |
| 29 | 29 | ||
| 30 | /* Parameters used to convert the timespec values */ | 30 | /* Parameters used to convert the timespec values */ |
| 31 | #ifndef USEC_PER_SEC | 31 | #define MSEC_PER_SEC (1000L) |
| 32 | #define USEC_PER_SEC (1000000L) | 32 | #define USEC_PER_SEC (1000000L) |
| 33 | #endif | ||
| 34 | |||
| 35 | #ifndef NSEC_PER_SEC | ||
| 36 | #define NSEC_PER_SEC (1000000000L) | 33 | #define NSEC_PER_SEC (1000000000L) |
| 37 | #endif | ||
| 38 | |||
| 39 | #ifndef NSEC_PER_USEC | ||
| 40 | #define NSEC_PER_USEC (1000L) | 34 | #define NSEC_PER_USEC (1000L) |
| 41 | #endif | ||
| 42 | 35 | ||
| 43 | static __inline__ int timespec_equal(struct timespec *a, struct timespec *b) | 36 | static __inline__ int timespec_equal(struct timespec *a, struct timespec *b) |
| 44 | { | 37 | { |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 542dbaee6512..343d883d69c5 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
| @@ -109,8 +109,6 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); | |||
| 109 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); | 109 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); |
| 110 | int sync_page_range(struct inode *inode, struct address_space *mapping, | 110 | int sync_page_range(struct inode *inode, struct address_space *mapping, |
| 111 | loff_t pos, size_t count); | 111 | loff_t pos, size_t count); |
| 112 | int sync_page_range_nolock(struct inode *inode, struct address_space | ||
| 113 | *mapping, loff_t pos, size_t count); | ||
| 114 | 112 | ||
| 115 | /* pdflush.c */ | 113 | /* pdflush.c */ |
| 116 | extern int nr_pdflush_threads; /* Global so it can be exported to sysctl | 114 | extern int nr_pdflush_threads; /* Global so it can be exported to sysctl |
