diff options
Diffstat (limited to 'arch/xtensa/include/asm/rwsem.h')
-rw-r--r-- | arch/xtensa/include/asm/rwsem.h | 168 |
1 files changed, 168 insertions, 0 deletions
diff --git a/arch/xtensa/include/asm/rwsem.h b/arch/xtensa/include/asm/rwsem.h new file mode 100644 index 000000000000..e39edf5c86f2 --- /dev/null +++ b/arch/xtensa/include/asm/rwsem.h | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | * include/asm-xtensa/rwsem.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Largely copied from include/asm-ppc/rwsem.h | ||
9 | * | ||
10 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
11 | */ | ||
12 | |||
13 | #ifndef _XTENSA_RWSEM_H | ||
14 | #define _XTENSA_RWSEM_H | ||
15 | |||
16 | #ifndef _LINUX_RWSEM_H | ||
17 | #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." | ||
18 | #endif | ||
19 | |||
20 | #include <linux/list.h> | ||
21 | #include <linux/spinlock.h> | ||
22 | #include <asm/atomic.h> | ||
23 | #include <asm/system.h> | ||
24 | |||
25 | /* | ||
26 | * the semaphore definition | ||
27 | */ | ||
28 | struct rw_semaphore { | ||
29 | signed long count; | ||
30 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | ||
31 | #define RWSEM_ACTIVE_BIAS 0x00000001 | ||
32 | #define RWSEM_ACTIVE_MASK 0x0000ffff | ||
33 | #define RWSEM_WAITING_BIAS (-0x00010000) | ||
34 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | ||
35 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | ||
36 | spinlock_t wait_lock; | ||
37 | struct list_head wait_list; | ||
38 | }; | ||
39 | |||
40 | #define __RWSEM_INITIALIZER(name) \ | ||
41 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ | ||
42 | LIST_HEAD_INIT((name).wait_list) } | ||
43 | |||
44 | #define DECLARE_RWSEM(name) \ | ||
45 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
46 | |||
47 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | ||
48 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | ||
49 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | ||
50 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
51 | |||
52 | static inline void init_rwsem(struct rw_semaphore *sem) | ||
53 | { | ||
54 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
55 | spin_lock_init(&sem->wait_lock); | ||
56 | INIT_LIST_HEAD(&sem->wait_list); | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * lock for reading | ||
61 | */ | ||
62 | static inline void __down_read(struct rw_semaphore *sem) | ||
63 | { | ||
64 | if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0) | ||
65 | smp_wmb(); | ||
66 | else | ||
67 | rwsem_down_read_failed(sem); | ||
68 | } | ||
69 | |||
70 | static inline int __down_read_trylock(struct rw_semaphore *sem) | ||
71 | { | ||
72 | int tmp; | ||
73 | |||
74 | while ((tmp = sem->count) >= 0) { | ||
75 | if (tmp == cmpxchg(&sem->count, tmp, | ||
76 | tmp + RWSEM_ACTIVE_READ_BIAS)) { | ||
77 | smp_wmb(); | ||
78 | return 1; | ||
79 | } | ||
80 | } | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * lock for writing | ||
86 | */ | ||
87 | static inline void __down_write(struct rw_semaphore *sem) | ||
88 | { | ||
89 | int tmp; | ||
90 | |||
91 | tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, | ||
92 | (atomic_t *)(&sem->count)); | ||
93 | if (tmp == RWSEM_ACTIVE_WRITE_BIAS) | ||
94 | smp_wmb(); | ||
95 | else | ||
96 | rwsem_down_write_failed(sem); | ||
97 | } | ||
98 | |||
99 | static inline int __down_write_trylock(struct rw_semaphore *sem) | ||
100 | { | ||
101 | int tmp; | ||
102 | |||
103 | tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, | ||
104 | RWSEM_ACTIVE_WRITE_BIAS); | ||
105 | smp_wmb(); | ||
106 | return tmp == RWSEM_UNLOCKED_VALUE; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * unlock after reading | ||
111 | */ | ||
112 | static inline void __up_read(struct rw_semaphore *sem) | ||
113 | { | ||
114 | int tmp; | ||
115 | |||
116 | smp_wmb(); | ||
117 | tmp = atomic_sub_return(1,(atomic_t *)(&sem->count)); | ||
118 | if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0) | ||
119 | rwsem_wake(sem); | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * unlock after writing | ||
124 | */ | ||
125 | static inline void __up_write(struct rw_semaphore *sem) | ||
126 | { | ||
127 | smp_wmb(); | ||
128 | if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, | ||
129 | (atomic_t *)(&sem->count)) < 0) | ||
130 | rwsem_wake(sem); | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * implement atomic add functionality | ||
135 | */ | ||
136 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | ||
137 | { | ||
138 | atomic_add(delta, (atomic_t *)(&sem->count)); | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * downgrade write lock to read lock | ||
143 | */ | ||
144 | static inline void __downgrade_write(struct rw_semaphore *sem) | ||
145 | { | ||
146 | int tmp; | ||
147 | |||
148 | smp_wmb(); | ||
149 | tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); | ||
150 | if (tmp < 0) | ||
151 | rwsem_downgrade_wake(sem); | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * implement exchange and add functionality | ||
156 | */ | ||
157 | static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | ||
158 | { | ||
159 | smp_mb(); | ||
160 | return atomic_add_return(delta, (atomic_t *)(&sem->count)); | ||
161 | } | ||
162 | |||
163 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
164 | { | ||
165 | return (sem->count != 0); | ||
166 | } | ||
167 | |||
168 | #endif /* _XTENSA_RWSEM_H */ | ||