diff options
author | Christoph Lameter <clameter@sgi.com> | 2005-08-22 15:20:00 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-08-23 13:31:57 -0400 |
commit | 16592d269801ea68130b283c888ffb3c3e988299 (patch) | |
tree | b6a2666e8c2f496c20c97eea06b6e1012593b80b /include | |
parent | f6fdd7d9c273bb2a20ab467cb57067494f932fa3 (diff) |
[IA64] Remove rwsem limitation of 32k waiters
We ran into the limit with the maximum number of waiters at one of our sites.
This patch increases the number of possible waiters from 2^15 to 2^31 by using
a long for the counter in struct rw_semaphore. S390 and alpha already do this.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Kenneth Chen <kenneth.w.chen@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-ia64/rwsem.h | 35 |
1 files changed, 18 insertions, 17 deletions
diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h index 6ece5061dc19..e18b5ab0cb75 100644 --- a/include/asm-ia64/rwsem.h +++ b/include/asm-ia64/rwsem.h | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com> | 4 | * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com> |
5 | * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com> | 5 | * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com> |
6 | * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com> | ||
6 | * | 7 | * |
7 | * Based on asm-i386/rwsem.h and other architecture implementation. | 8 | * Based on asm-i386/rwsem.h and other architecture implementation. |
8 | * | 9 | * |
@@ -11,9 +12,9 @@ | |||
11 | * | 12 | * |
12 | * The lock count is initialized to 0 (no active and no waiting lockers). | 13 | * The lock count is initialized to 0 (no active and no waiting lockers). |
13 | * | 14 | * |
14 | * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case | 15 | * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for |
15 | * of an uncontended lock. Readers increment by 1 and see a positive value | 16 | * the case of an uncontended lock. Readers increment by 1 and see a positive |
16 | * when uncontended, negative if there are writers (and maybe) readers | 17 | * value when uncontended, negative if there are writers (and maybe) readers |
17 | * waiting (in which case it goes to sleep). | 18 | * waiting (in which case it goes to sleep). |
18 | */ | 19 | */ |
19 | 20 | ||
@@ -29,7 +30,7 @@ | |||
29 | * the semaphore definition | 30 | * the semaphore definition |
30 | */ | 31 | */ |
31 | struct rw_semaphore { | 32 | struct rw_semaphore { |
32 | signed int count; | 33 | signed long count; |
33 | spinlock_t wait_lock; | 34 | spinlock_t wait_lock; |
34 | struct list_head wait_list; | 35 | struct list_head wait_list; |
35 | #if RWSEM_DEBUG | 36 | #if RWSEM_DEBUG |
@@ -37,10 +38,10 @@ struct rw_semaphore { | |||
37 | #endif | 38 | #endif |
38 | }; | 39 | }; |
39 | 40 | ||
40 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 41 | #define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000) |
41 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 42 | #define RWSEM_ACTIVE_BIAS __IA64_UL_CONST(0x0000000000000001) |
42 | #define RWSEM_ACTIVE_MASK 0x0000ffff | 43 | #define RWSEM_ACTIVE_MASK __IA64_UL_CONST(0x00000000ffffffff) |
43 | #define RWSEM_WAITING_BIAS (-0x00010000) | 44 | #define RWSEM_WAITING_BIAS -__IA64_UL_CONST(0x0000000100000000) |
44 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 45 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
45 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 46 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
46 | 47 | ||
@@ -83,7 +84,7 @@ init_rwsem (struct rw_semaphore *sem) | |||
83 | static inline void | 84 | static inline void |
84 | __down_read (struct rw_semaphore *sem) | 85 | __down_read (struct rw_semaphore *sem) |
85 | { | 86 | { |
86 | int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1); | 87 | long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1); |
87 | 88 | ||
88 | if (result < 0) | 89 | if (result < 0) |
89 | rwsem_down_read_failed(sem); | 90 | rwsem_down_read_failed(sem); |
@@ -95,7 +96,7 @@ __down_read (struct rw_semaphore *sem) | |||
95 | static inline void | 96 | static inline void |
96 | __down_write (struct rw_semaphore *sem) | 97 | __down_write (struct rw_semaphore *sem) |
97 | { | 98 | { |
98 | int old, new; | 99 | long old, new; |
99 | 100 | ||
100 | do { | 101 | do { |
101 | old = sem->count; | 102 | old = sem->count; |
@@ -112,7 +113,7 @@ __down_write (struct rw_semaphore *sem) | |||
112 | static inline void | 113 | static inline void |
113 | __up_read (struct rw_semaphore *sem) | 114 | __up_read (struct rw_semaphore *sem) |
114 | { | 115 | { |
115 | int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1); | 116 | long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1); |
116 | 117 | ||
117 | if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) | 118 | if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) |
118 | rwsem_wake(sem); | 119 | rwsem_wake(sem); |
@@ -124,7 +125,7 @@ __up_read (struct rw_semaphore *sem) | |||
124 | static inline void | 125 | static inline void |
125 | __up_write (struct rw_semaphore *sem) | 126 | __up_write (struct rw_semaphore *sem) |
126 | { | 127 | { |
127 | int old, new; | 128 | long old, new; |
128 | 129 | ||
129 | do { | 130 | do { |
130 | old = sem->count; | 131 | old = sem->count; |
@@ -141,7 +142,7 @@ __up_write (struct rw_semaphore *sem) | |||
141 | static inline int | 142 | static inline int |
142 | __down_read_trylock (struct rw_semaphore *sem) | 143 | __down_read_trylock (struct rw_semaphore *sem) |
143 | { | 144 | { |
144 | int tmp; | 145 | long tmp; |
145 | while ((tmp = sem->count) >= 0) { | 146 | while ((tmp = sem->count) >= 0) { |
146 | if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) { | 147 | if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) { |
147 | return 1; | 148 | return 1; |
@@ -156,7 +157,7 @@ __down_read_trylock (struct rw_semaphore *sem) | |||
156 | static inline int | 157 | static inline int |
157 | __down_write_trylock (struct rw_semaphore *sem) | 158 | __down_write_trylock (struct rw_semaphore *sem) |
158 | { | 159 | { |
159 | int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE, | 160 | long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE, |
160 | RWSEM_ACTIVE_WRITE_BIAS); | 161 | RWSEM_ACTIVE_WRITE_BIAS); |
161 | return tmp == RWSEM_UNLOCKED_VALUE; | 162 | return tmp == RWSEM_UNLOCKED_VALUE; |
162 | } | 163 | } |
@@ -167,7 +168,7 @@ __down_write_trylock (struct rw_semaphore *sem) | |||
167 | static inline void | 168 | static inline void |
168 | __downgrade_write (struct rw_semaphore *sem) | 169 | __downgrade_write (struct rw_semaphore *sem) |
169 | { | 170 | { |
170 | int old, new; | 171 | long old, new; |
171 | 172 | ||
172 | do { | 173 | do { |
173 | old = sem->count; | 174 | old = sem->count; |
@@ -182,7 +183,7 @@ __downgrade_write (struct rw_semaphore *sem) | |||
182 | * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1 | 183 | * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1 |
183 | * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd. | 184 | * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd. |
184 | */ | 185 | */ |
185 | #define rwsem_atomic_add(delta, sem) atomic_add(delta, (atomic_t *)(&(sem)->count)) | 186 | #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count)) |
186 | #define rwsem_atomic_update(delta, sem) atomic_add_return(delta, (atomic_t *)(&(sem)->count)) | 187 | #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) |
187 | 188 | ||
188 | #endif /* _ASM_IA64_RWSEM_H */ | 189 | #endif /* _ASM_IA64_RWSEM_H */ |