diff options
author | Tony Luck <tony.luck@intel.com> | 2008-08-01 13:13:32 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2008-08-01 13:21:21 -0400 |
commit | 7f30491ccd28627742e37899453ae20e3da8e18f (patch) | |
tree | 7291c0a26ed3a31acf9542857af3981d278f5de8 /arch/ia64/include/asm/rwsem.h | |
parent | 94ad374a0751f40d25e22e036c37f7263569d24c (diff) |
[IA64] Move include/asm-ia64 to arch/ia64/include/asm
After moving the the include files there were a few clean-ups:
1) Some files used #include <asm-ia64/xyz.h>, changed to <asm/xyz.h>
2) Some comments alerted maintainers to look at various header files to
make matching updates if certain code were to be changed. Updated these
comments to use the new include paths.
3) Some header files mentioned their own names in initial comments. Just
deleted these self references.
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/include/asm/rwsem.h')
-rw-r--r-- | arch/ia64/include/asm/rwsem.h | 182 |
1 files changed, 182 insertions, 0 deletions
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h new file mode 100644 index 000000000000..fbee74b15782 --- /dev/null +++ b/arch/ia64/include/asm/rwsem.h | |||
@@ -0,0 +1,182 @@ | |||
1 | /* | ||
2 | * R/W semaphores for ia64 | ||
3 | * | ||
4 | * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com> | ||
5 | * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com> | ||
6 | * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com> | ||
7 | * | ||
8 | * Based on asm-i386/rwsem.h and other architecture implementation. | ||
9 | * | ||
10 | * The MSW of the count is the negated number of active writers and | ||
11 | * waiting lockers, and the LSW is the total number of active locks. | ||
12 | * | ||
13 | * The lock count is initialized to 0 (no active and no waiting lockers). | ||
14 | * | ||
15 | * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for | ||
16 | * the case of an uncontended lock. Readers increment by 1 and see a positive | ||
17 | * value when uncontended, negative if there are writers (and maybe) readers | ||
18 | * waiting (in which case it goes to sleep). | ||
19 | */ | ||
20 | |||
21 | #ifndef _ASM_IA64_RWSEM_H | ||
22 | #define _ASM_IA64_RWSEM_H | ||
23 | |||
24 | #ifndef _LINUX_RWSEM_H | ||
25 | #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." | ||
26 | #endif | ||
27 | |||
28 | #include <linux/list.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | |||
31 | #include <asm/intrinsics.h> | ||
32 | |||
33 | /* | ||
34 | * the semaphore definition | ||
35 | */ | ||
36 | struct rw_semaphore { | ||
37 | signed long count; | ||
38 | spinlock_t wait_lock; | ||
39 | struct list_head wait_list; | ||
40 | }; | ||
41 | |||
42 | #define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000) | ||
43 | #define RWSEM_ACTIVE_BIAS __IA64_UL_CONST(0x0000000000000001) | ||
44 | #define RWSEM_ACTIVE_MASK __IA64_UL_CONST(0x00000000ffffffff) | ||
45 | #define RWSEM_WAITING_BIAS -__IA64_UL_CONST(0x0000000100000000) | ||
46 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | ||
47 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | ||
48 | |||
49 | #define __RWSEM_INITIALIZER(name) \ | ||
50 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ | ||
51 | LIST_HEAD_INIT((name).wait_list) } | ||
52 | |||
53 | #define DECLARE_RWSEM(name) \ | ||
54 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
55 | |||
56 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | ||
57 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | ||
58 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | ||
59 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
60 | |||
61 | static inline void | ||
62 | init_rwsem (struct rw_semaphore *sem) | ||
63 | { | ||
64 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
65 | spin_lock_init(&sem->wait_lock); | ||
66 | INIT_LIST_HEAD(&sem->wait_list); | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * lock for reading | ||
71 | */ | ||
72 | static inline void | ||
73 | __down_read (struct rw_semaphore *sem) | ||
74 | { | ||
75 | long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1); | ||
76 | |||
77 | if (result < 0) | ||
78 | rwsem_down_read_failed(sem); | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * lock for writing | ||
83 | */ | ||
84 | static inline void | ||
85 | __down_write (struct rw_semaphore *sem) | ||
86 | { | ||
87 | long old, new; | ||
88 | |||
89 | do { | ||
90 | old = sem->count; | ||
91 | new = old + RWSEM_ACTIVE_WRITE_BIAS; | ||
92 | } while (cmpxchg_acq(&sem->count, old, new) != old); | ||
93 | |||
94 | if (old != 0) | ||
95 | rwsem_down_write_failed(sem); | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * unlock after reading | ||
100 | */ | ||
101 | static inline void | ||
102 | __up_read (struct rw_semaphore *sem) | ||
103 | { | ||
104 | long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1); | ||
105 | |||
106 | if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) | ||
107 | rwsem_wake(sem); | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * unlock after writing | ||
112 | */ | ||
113 | static inline void | ||
114 | __up_write (struct rw_semaphore *sem) | ||
115 | { | ||
116 | long old, new; | ||
117 | |||
118 | do { | ||
119 | old = sem->count; | ||
120 | new = old - RWSEM_ACTIVE_WRITE_BIAS; | ||
121 | } while (cmpxchg_rel(&sem->count, old, new) != old); | ||
122 | |||
123 | if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0) | ||
124 | rwsem_wake(sem); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * trylock for reading -- returns 1 if successful, 0 if contention | ||
129 | */ | ||
130 | static inline int | ||
131 | __down_read_trylock (struct rw_semaphore *sem) | ||
132 | { | ||
133 | long tmp; | ||
134 | while ((tmp = sem->count) >= 0) { | ||
135 | if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) { | ||
136 | return 1; | ||
137 | } | ||
138 | } | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * trylock for writing -- returns 1 if successful, 0 if contention | ||
144 | */ | ||
145 | static inline int | ||
146 | __down_write_trylock (struct rw_semaphore *sem) | ||
147 | { | ||
148 | long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE, | ||
149 | RWSEM_ACTIVE_WRITE_BIAS); | ||
150 | return tmp == RWSEM_UNLOCKED_VALUE; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * downgrade write lock to read lock | ||
155 | */ | ||
156 | static inline void | ||
157 | __downgrade_write (struct rw_semaphore *sem) | ||
158 | { | ||
159 | long old, new; | ||
160 | |||
161 | do { | ||
162 | old = sem->count; | ||
163 | new = old - RWSEM_WAITING_BIAS; | ||
164 | } while (cmpxchg_rel(&sem->count, old, new) != old); | ||
165 | |||
166 | if (old < 0) | ||
167 | rwsem_downgrade_wake(sem); | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1 | ||
172 | * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd. | ||
173 | */ | ||
174 | #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count)) | ||
175 | #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) | ||
176 | |||
177 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
178 | { | ||
179 | return (sem->count != 0); | ||
180 | } | ||
181 | |||
182 | #endif /* _ASM_IA64_RWSEM_H */ | ||