aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-08-24 00:41:48 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-08-24 00:41:48 -0400
commit529b7307d804f649839b5b65b303442140266d26 (patch)
tree1fb7edf7770e27d1e596a1d0aa2a64c1b986938d /arch/powerpc
parentb1515af2911c4339ed34865a0594f4ad3392921a (diff)
powerpc: Make rwsem use "long" type
This makes the 64-bit kernel use 64-bit signed integers for the counter (effectively supporting 32-bit of active count in the semaphore), thus avoiding things like overflow of the mmap_sem if you use a really crazy number of threads Note: Ideally the type in the structure should be atomic_long_t rather than "long". However, there's some nasty issues with that. It needs to be initialized statically -and- lib/rwsem.c does things like sem->count = RWSEM_UNLOCKED_VALUE; Now, if you mix in the fact that atomic_* types are actually structures with one member and note typedefs of a scalar, it makes its really nasty. So I stuck to what we did before using a long and casts for now. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/rwsem.h64
1 files changed, 37 insertions, 27 deletions
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h
index 24cd9281ec37..8447d89fbe72 100644
--- a/arch/powerpc/include/asm/rwsem.h
+++ b/arch/powerpc/include/asm/rwsem.h
@@ -21,15 +21,20 @@
21/* 21/*
22 * the semaphore definition 22 * the semaphore definition
23 */ 23 */
24struct rw_semaphore { 24#ifdef CONFIG_PPC64
25 /* XXX this should be able to be an atomic_t -- paulus */ 25# define RWSEM_ACTIVE_MASK 0xffffffffL
26 signed int count; 26#else
27#define RWSEM_UNLOCKED_VALUE 0x00000000 27# define RWSEM_ACTIVE_MASK 0x0000ffffL
28#define RWSEM_ACTIVE_BIAS 0x00000001 28#endif
29#define RWSEM_ACTIVE_MASK 0x0000ffff 29
30#define RWSEM_WAITING_BIAS (-0x00010000) 30#define RWSEM_UNLOCKED_VALUE 0x00000000L
31#define RWSEM_ACTIVE_BIAS 0x00000001L
32#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
31#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 33#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
32#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 34#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
35
36struct rw_semaphore {
37 long count;
33 spinlock_t wait_lock; 38 spinlock_t wait_lock;
34 struct list_head wait_list; 39 struct list_head wait_list;
35#ifdef CONFIG_DEBUG_LOCK_ALLOC 40#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -43,9 +48,13 @@ struct rw_semaphore {
43# define __RWSEM_DEP_MAP_INIT(lockname) 48# define __RWSEM_DEP_MAP_INIT(lockname)
44#endif 49#endif
45 50
46#define __RWSEM_INITIALIZER(name) \ 51#define __RWSEM_INITIALIZER(name) \
47 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ 52{ \
48 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } 53 RWSEM_UNLOCKED_VALUE, \
54 __SPIN_LOCK_UNLOCKED((name).wait_lock), \
55 LIST_HEAD_INIT((name).wait_list) \
56 __RWSEM_DEP_MAP_INIT(name) \
57}
49 58
50#define DECLARE_RWSEM(name) \ 59#define DECLARE_RWSEM(name) \
51 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 60 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -70,13 +79,13 @@ extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
70 */ 79 */
71static inline void __down_read(struct rw_semaphore *sem) 80static inline void __down_read(struct rw_semaphore *sem)
72{ 81{
73 if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0)) 82 if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
74 rwsem_down_read_failed(sem); 83 rwsem_down_read_failed(sem);
75} 84}
76 85
77static inline int __down_read_trylock(struct rw_semaphore *sem) 86static inline int __down_read_trylock(struct rw_semaphore *sem)
78{ 87{
79 int tmp; 88 long tmp;
80 89
81 while ((tmp = sem->count) >= 0) { 90 while ((tmp = sem->count) >= 0) {
82 if (tmp == cmpxchg(&sem->count, tmp, 91 if (tmp == cmpxchg(&sem->count, tmp,
@@ -92,10 +101,10 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
92 */ 101 */
93static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) 102static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
94{ 103{
95 int tmp; 104 long tmp;
96 105
97 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, 106 tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
98 (atomic_t *)(&sem->count)); 107 (atomic_long_t *)&sem->count);
99 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) 108 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
100 rwsem_down_write_failed(sem); 109 rwsem_down_write_failed(sem);
101} 110}
@@ -107,7 +116,7 @@ static inline void __down_write(struct rw_semaphore *sem)
107 116
108static inline int __down_write_trylock(struct rw_semaphore *sem) 117static inline int __down_write_trylock(struct rw_semaphore *sem)
109{ 118{
110 int tmp; 119 long tmp;
111 120
112 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, 121 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
113 RWSEM_ACTIVE_WRITE_BIAS); 122 RWSEM_ACTIVE_WRITE_BIAS);
@@ -119,9 +128,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
119 */ 128 */
120static inline void __up_read(struct rw_semaphore *sem) 129static inline void __up_read(struct rw_semaphore *sem)
121{ 130{
122 int tmp; 131 long tmp;
123 132
124 tmp = atomic_dec_return((atomic_t *)(&sem->count)); 133 tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
125 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) 134 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
126 rwsem_wake(sem); 135 rwsem_wake(sem);
127} 136}
@@ -131,17 +140,17 @@ static inline void __up_read(struct rw_semaphore *sem)
131 */ 140 */
132static inline void __up_write(struct rw_semaphore *sem) 141static inline void __up_write(struct rw_semaphore *sem)
133{ 142{
134 if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, 143 if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
135 (atomic_t *)(&sem->count)) < 0)) 144 (atomic_long_t *)&sem->count) < 0))
136 rwsem_wake(sem); 145 rwsem_wake(sem);
137} 146}
138 147
139/* 148/*
140 * implement atomic add functionality 149 * implement atomic add functionality
141 */ 150 */
142static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) 151static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
143{ 152{
144 atomic_add(delta, (atomic_t *)(&sem->count)); 153 atomic_long_add(delta, (atomic_long_t *)&sem->count);
145} 154}
146 155
147/* 156/*
@@ -149,9 +158,10 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
149 */ 158 */
150static inline void __downgrade_write(struct rw_semaphore *sem) 159static inline void __downgrade_write(struct rw_semaphore *sem)
151{ 160{
152 int tmp; 161 long tmp;
153 162
154 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); 163 tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
164 (atomic_long_t *)&sem->count);
155 if (tmp < 0) 165 if (tmp < 0)
156 rwsem_downgrade_wake(sem); 166 rwsem_downgrade_wake(sem);
157} 167}
@@ -159,14 +169,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
159/* 169/*
160 * implement exchange and add functionality 170 * implement exchange and add functionality
161 */ 171 */
162static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 172static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
163{ 173{
164 return atomic_add_return(delta, (atomic_t *)(&sem->count)); 174 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
165} 175}
166 176
167static inline int rwsem_is_locked(struct rw_semaphore *sem) 177static inline int rwsem_is_locked(struct rw_semaphore *sem)
168{ 178{
169 return (sem->count != 0); 179 return sem->count != 0;
170} 180}
171 181
172#endif /* __KERNEL__ */ 182#endif /* __KERNEL__ */