diff options
Diffstat (limited to 'arch/xtensa/kernel/semaphore.c')
-rw-r--r-- | arch/xtensa/kernel/semaphore.c | 226 |
1 files changed, 226 insertions, 0 deletions
diff --git a/arch/xtensa/kernel/semaphore.c b/arch/xtensa/kernel/semaphore.c new file mode 100644 index 000000000000..d40f4b1b75ac --- /dev/null +++ b/arch/xtensa/kernel/semaphore.c | |||
@@ -0,0 +1,226 @@ | |||
1 | /* | ||
2 | * arch/xtensa/kernel/semaphore.c | ||
3 | * | ||
4 | * Generic semaphore code. Buyer beware. Do your own specific changes | ||
5 | * in <asm/semaphore-helper.h> | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | * | ||
11 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
12 | * | ||
13 | * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> | ||
14 | * Chris Zankel <chris@zankel.net> | ||
15 | * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca> | ||
16 | * Kevin Chea | ||
17 | */ | ||
18 | |||
19 | #include <linux/sched.h> | ||
20 | #include <linux/wait.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <asm/semaphore.h> | ||
23 | #include <asm/errno.h> | ||
24 | |||
25 | /* | ||
26 | * These two _must_ execute atomically wrt each other. | ||
27 | */ | ||
28 | |||
29 | static __inline__ void wake_one_more(struct semaphore * sem) | ||
30 | { | ||
31 | atomic_inc((atomic_t *)&sem->sleepers); | ||
32 | } | ||
33 | |||
34 | static __inline__ int waking_non_zero(struct semaphore *sem) | ||
35 | { | ||
36 | unsigned long flags; | ||
37 | int ret = 0; | ||
38 | |||
39 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
40 | if (sem->sleepers > 0) { | ||
41 | sem->sleepers--; | ||
42 | ret = 1; | ||
43 | } | ||
44 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
45 | return ret; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * waking_non_zero_interruptible: | ||
50 | * 1 got the lock | ||
51 | * 0 go to sleep | ||
52 | * -EINTR interrupted | ||
53 | * | ||
54 | * We must undo the sem->count down_interruptible() increment while we are | ||
55 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
56 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
57 | */ | ||
58 | |||
59 | static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, | ||
60 | struct task_struct *tsk) | ||
61 | { | ||
62 | unsigned long flags; | ||
63 | int ret = 0; | ||
64 | |||
65 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
66 | if (sem->sleepers > 0) { | ||
67 | sem->sleepers--; | ||
68 | ret = 1; | ||
69 | } else if (signal_pending(tsk)) { | ||
70 | atomic_inc(&sem->count); | ||
71 | ret = -EINTR; | ||
72 | } | ||
73 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * waking_non_zero_trylock: | ||
79 | * 1 failed to lock | ||
80 | * 0 got the lock | ||
81 | * | ||
82 | * We must undo the sem->count down_trylock() increment while we are | ||
83 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
84 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
85 | */ | ||
86 | |||
87 | static __inline__ int waking_non_zero_trylock(struct semaphore *sem) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | int ret = 1; | ||
91 | |||
92 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
93 | if (sem->sleepers <= 0) | ||
94 | atomic_inc(&sem->count); | ||
95 | else { | ||
96 | sem->sleepers--; | ||
97 | ret = 0; | ||
98 | } | ||
99 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | spinlock_t semaphore_wake_lock; | ||
104 | |||
105 | /* | ||
106 | * Semaphores are implemented using a two-way counter: | ||
107 | * The "count" variable is decremented for each process | ||
108 | * that tries to sleep, while the "waking" variable is | ||
109 | * incremented when the "up()" code goes to wake up waiting | ||
110 | * processes. | ||
111 | * | ||
112 | * Notably, the inline "up()" and "down()" functions can | ||
113 | * efficiently test if they need to do any extra work (up | ||
114 | * needs to do something only if count was negative before | ||
115 | * the increment operation. | ||
116 | * | ||
117 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
118 | * atomically. | ||
119 | * | ||
120 | * When __up() is called, the count was negative before | ||
121 | * incrementing it, and we need to wake up somebody. | ||
122 | * | ||
123 | * This routine adds one to the count of processes that need to | ||
124 | * wake up and exit. ALL waiting processes actually wake up but | ||
125 | * only the one that gets to the "waking" field first will gate | ||
126 | * through and acquire the semaphore. The others will go back | ||
127 | * to sleep. | ||
128 | * | ||
129 | * Note that these functions are only called when there is | ||
130 | * contention on the lock, and as such all this is the | ||
131 | * "non-critical" part of the whole semaphore business. The | ||
132 | * critical part is the inline stuff in <asm/semaphore.h> | ||
133 | * where we want to avoid any extra jumps and calls. | ||
134 | */ | ||
135 | |||
136 | void __up(struct semaphore *sem) | ||
137 | { | ||
138 | wake_one_more(sem); | ||
139 | wake_up(&sem->wait); | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * Perform the "down" function. Return zero for semaphore acquired, | ||
144 | * return negative for signalled out of the function. | ||
145 | * | ||
146 | * If called from __down, the return is ignored and the wait loop is | ||
147 | * not interruptible. This means that a task waiting on a semaphore | ||
148 | * using "down()" cannot be killed until someone does an "up()" on | ||
149 | * the semaphore. | ||
150 | * | ||
151 | * If called from __down_interruptible, the return value gets checked | ||
152 | * upon return. If the return value is negative then the task continues | ||
153 | * with the negative value in the return register (it can be tested by | ||
154 | * the caller). | ||
155 | * | ||
156 | * Either form may be used in conjunction with "up()". | ||
157 | * | ||
158 | */ | ||
159 | |||
160 | #define DOWN_VAR \ | ||
161 | struct task_struct *tsk = current; \ | ||
162 | wait_queue_t wait; \ | ||
163 | init_waitqueue_entry(&wait, tsk); | ||
164 | |||
165 | #define DOWN_HEAD(task_state) \ | ||
166 | \ | ||
167 | \ | ||
168 | tsk->state = (task_state); \ | ||
169 | add_wait_queue(&sem->wait, &wait); \ | ||
170 | \ | ||
171 | /* \ | ||
172 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
173 | * so we must wait. \ | ||
174 | * \ | ||
175 | * We can let go the lock for purposes of waiting. \ | ||
176 | * We re-acquire it after awaking so as to protect \ | ||
177 | * all semaphore operations. \ | ||
178 | * \ | ||
179 | * If "up()" is called before we call waking_non_zero() then \ | ||
180 | * we will catch it right away. If it is called later then \ | ||
181 | * we will have to go through a wakeup cycle to catch it. \ | ||
182 | * \ | ||
183 | * Multiple waiters contend for the semaphore lock to see \ | ||
184 | * who gets to gate through and who has to wait some more. \ | ||
185 | */ \ | ||
186 | for (;;) { | ||
187 | |||
188 | #define DOWN_TAIL(task_state) \ | ||
189 | tsk->state = (task_state); \ | ||
190 | } \ | ||
191 | tsk->state = TASK_RUNNING; \ | ||
192 | remove_wait_queue(&sem->wait, &wait); | ||
193 | |||
194 | void __sched __down(struct semaphore * sem) | ||
195 | { | ||
196 | DOWN_VAR | ||
197 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
198 | if (waking_non_zero(sem)) | ||
199 | break; | ||
200 | schedule(); | ||
201 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
202 | } | ||
203 | |||
204 | int __sched __down_interruptible(struct semaphore * sem) | ||
205 | { | ||
206 | int ret = 0; | ||
207 | DOWN_VAR | ||
208 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
209 | |||
210 | ret = waking_non_zero_interruptible(sem, tsk); | ||
211 | if (ret) | ||
212 | { | ||
213 | if (ret == 1) | ||
214 | /* ret != 0 only if we get interrupted -arca */ | ||
215 | ret = 0; | ||
216 | break; | ||
217 | } | ||
218 | schedule(); | ||
219 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | int __down_trylock(struct semaphore * sem) | ||
224 | { | ||
225 | return waking_non_zero_trylock(sem); | ||
226 | } | ||