diff options
Diffstat (limited to 'drivers/hwspinlock')
-rw-r--r-- | drivers/hwspinlock/Kconfig | 13 | ||||
-rw-r--r-- | drivers/hwspinlock/Makefile | 5 | ||||
-rw-r--r-- | drivers/hwspinlock/hwspinlock_core.c | 548 | ||||
-rw-r--r-- | drivers/hwspinlock/hwspinlock_internal.h | 61 |
4 files changed, 627 insertions, 0 deletions
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig new file mode 100644 index 000000000000..9dd8db46606b --- /dev/null +++ b/drivers/hwspinlock/Kconfig | |||
@@ -0,0 +1,13 @@ | |||
1 | # | ||
2 | # Generic HWSPINLOCK framework | ||
3 | # | ||
4 | |||
5 | config HWSPINLOCK | ||
6 | tristate "Generic Hardware Spinlock framework" | ||
7 | help | ||
8 | Say y here to support the generic hardware spinlock framework. | ||
9 | You only need to enable this if you have hardware spinlock module | ||
10 | on your system (usually only relevant if your system has remote slave | ||
11 | coprocessors). | ||
12 | |||
13 | If unsure, say N. | ||
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile new file mode 100644 index 000000000000..b9d2b9f40491 --- /dev/null +++ b/drivers/hwspinlock/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Generic Hardware Spinlock framework | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o | ||
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c new file mode 100644 index 000000000000..43a62714b4fb --- /dev/null +++ b/drivers/hwspinlock/hwspinlock_core.c | |||
@@ -0,0 +1,548 @@ | |||
1 | /* | ||
2 | * Hardware spinlock framework | ||
3 | * | ||
4 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * | ||
6 | * Contact: Ohad Ben-Cohen <ohad@wizery.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License version 2 as published | ||
10 | * by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | */ | ||
17 | |||
18 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/err.h> | ||
25 | #include <linux/jiffies.h> | ||
26 | #include <linux/radix-tree.h> | ||
27 | #include <linux/hwspinlock.h> | ||
28 | #include <linux/pm_runtime.h> | ||
29 | |||
30 | #include "hwspinlock_internal.h" | ||
31 | |||
32 | /* radix tree tags */ | ||
33 | #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */ | ||
34 | |||
35 | /* | ||
36 | * A radix tree is used to maintain the available hwspinlock instances. | ||
37 | * The tree associates hwspinlock pointers with their integer key id, | ||
38 | * and provides easy-to-use API which makes the hwspinlock core code simple | ||
39 | * and easy to read. | ||
40 | * | ||
41 | * Radix trees are quick on lookups, and reasonably efficient in terms of | ||
42 | * storage, especially with high density usages such as this framework | ||
43 | * requires (a continuous range of integer keys, beginning with zero, is | ||
44 | * used as the ID's of the hwspinlock instances). | ||
45 | * | ||
46 | * The radix tree API supports tagging items in the tree, which this | ||
47 | * framework uses to mark unused hwspinlock instances (see the | ||
48 | * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the | ||
49 | * tree, looking for an unused hwspinlock instance, is now reduced to a | ||
50 | * single radix tree API call. | ||
51 | */ | ||
52 | static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); | ||
53 | |||
54 | /* | ||
55 | * Synchronization of access to the tree is achieved using this spinlock, | ||
56 | * as the radix-tree API requires that users provide all synchronisation. | ||
57 | */ | ||
58 | static DEFINE_SPINLOCK(hwspinlock_tree_lock); | ||
59 | |||
60 | /** | ||
61 | * __hwspin_trylock() - attempt to lock a specific hwspinlock | ||
62 | * @hwlock: an hwspinlock which we want to trylock | ||
63 | * @mode: controls whether local interrupts are disabled or not | ||
64 | * @flags: a pointer where the caller's interrupt state will be saved at (if | ||
65 | * requested) | ||
66 | * | ||
67 | * This function attempts to lock an hwspinlock, and will immediately | ||
68 | * fail if the hwspinlock is already taken. | ||
69 | * | ||
70 | * Upon a successful return from this function, preemption (and possibly | ||
71 | * interrupts) is disabled, so the caller must not sleep, and is advised to | ||
72 | * release the hwspinlock as soon as possible. This is required in order to | ||
73 | * minimize remote cores polling on the hardware interconnect. | ||
74 | * | ||
75 | * The user decides whether local interrupts are disabled or not, and if yes, | ||
76 | * whether he wants their previous state to be saved. It is up to the user | ||
77 | * to choose the appropriate @mode of operation, exactly the same way users | ||
78 | * should decide between spin_trylock, spin_trylock_irq and | ||
79 | * spin_trylock_irqsave. | ||
80 | * | ||
81 | * Returns 0 if we successfully locked the hwspinlock or -EBUSY if | ||
82 | * the hwspinlock was already taken. | ||
83 | * This function will never sleep. | ||
84 | */ | ||
85 | int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) | ||
86 | { | ||
87 | int ret; | ||
88 | |||
89 | BUG_ON(!hwlock); | ||
90 | BUG_ON(!flags && mode == HWLOCK_IRQSTATE); | ||
91 | |||
92 | /* | ||
93 | * This spin_lock{_irq, _irqsave} serves three purposes: | ||
94 | * | ||
95 | * 1. Disable preemption, in order to minimize the period of time | ||
96 | * in which the hwspinlock is taken. This is important in order | ||
97 | * to minimize the possible polling on the hardware interconnect | ||
98 | * by a remote user of this lock. | ||
99 | * 2. Make the hwspinlock SMP-safe (so we can take it from | ||
100 | * additional contexts on the local host). | ||
101 | * 3. Ensure that in_atomic/might_sleep checks catch potential | ||
102 | * problems with hwspinlock usage (e.g. scheduler checks like | ||
103 | * 'scheduling while atomic' etc.) | ||
104 | */ | ||
105 | if (mode == HWLOCK_IRQSTATE) | ||
106 | ret = spin_trylock_irqsave(&hwlock->lock, *flags); | ||
107 | else if (mode == HWLOCK_IRQ) | ||
108 | ret = spin_trylock_irq(&hwlock->lock); | ||
109 | else | ||
110 | ret = spin_trylock(&hwlock->lock); | ||
111 | |||
112 | /* is lock already taken by another context on the local cpu ? */ | ||
113 | if (!ret) | ||
114 | return -EBUSY; | ||
115 | |||
116 | /* try to take the hwspinlock device */ | ||
117 | ret = hwlock->ops->trylock(hwlock); | ||
118 | |||
119 | /* if hwlock is already taken, undo spin_trylock_* and exit */ | ||
120 | if (!ret) { | ||
121 | if (mode == HWLOCK_IRQSTATE) | ||
122 | spin_unlock_irqrestore(&hwlock->lock, *flags); | ||
123 | else if (mode == HWLOCK_IRQ) | ||
124 | spin_unlock_irq(&hwlock->lock); | ||
125 | else | ||
126 | spin_unlock(&hwlock->lock); | ||
127 | |||
128 | return -EBUSY; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * We can be sure the other core's memory operations | ||
133 | * are observable to us only _after_ we successfully take | ||
134 | * the hwspinlock, and we must make sure that subsequent memory | ||
135 | * operations (both reads and writes) will not be reordered before | ||
136 | * we actually took the hwspinlock. | ||
137 | * | ||
138 | * Note: the implicit memory barrier of the spinlock above is too | ||
139 | * early, so we need this additional explicit memory barrier. | ||
140 | */ | ||
141 | mb(); | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | EXPORT_SYMBOL_GPL(__hwspin_trylock); | ||
146 | |||
147 | /** | ||
148 | * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit | ||
149 | * @hwlock: the hwspinlock to be locked | ||
150 | * @timeout: timeout value in msecs | ||
151 | * @mode: mode which controls whether local interrupts are disabled or not | ||
152 | * @flags: a pointer to where the caller's interrupt state will be saved at (if | ||
153 | * requested) | ||
154 | * | ||
155 | * This function locks the given @hwlock. If the @hwlock | ||
156 | * is already taken, the function will busy loop waiting for it to | ||
157 | * be released, but give up after @timeout msecs have elapsed. | ||
158 | * | ||
159 | * Upon a successful return from this function, preemption is disabled | ||
160 | * (and possibly local interrupts, too), so the caller must not sleep, | ||
161 | * and is advised to release the hwspinlock as soon as possible. | ||
162 | * This is required in order to minimize remote cores polling on the | ||
163 | * hardware interconnect. | ||
164 | * | ||
165 | * The user decides whether local interrupts are disabled or not, and if yes, | ||
166 | * whether he wants their previous state to be saved. It is up to the user | ||
167 | * to choose the appropriate @mode of operation, exactly the same way users | ||
168 | * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave. | ||
169 | * | ||
170 | * Returns 0 when the @hwlock was successfully taken, and an appropriate | ||
171 | * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still | ||
172 | * busy after @timeout msecs). The function will never sleep. | ||
173 | */ | ||
174 | int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, | ||
175 | int mode, unsigned long *flags) | ||
176 | { | ||
177 | int ret; | ||
178 | unsigned long expire; | ||
179 | |||
180 | expire = msecs_to_jiffies(to) + jiffies; | ||
181 | |||
182 | for (;;) { | ||
183 | /* Try to take the hwspinlock */ | ||
184 | ret = __hwspin_trylock(hwlock, mode, flags); | ||
185 | if (ret != -EBUSY) | ||
186 | break; | ||
187 | |||
188 | /* | ||
189 | * The lock is already taken, let's check if the user wants | ||
190 | * us to try again | ||
191 | */ | ||
192 | if (time_is_before_eq_jiffies(expire)) | ||
193 | return -ETIMEDOUT; | ||
194 | |||
195 | /* | ||
196 | * Allow platform-specific relax handlers to prevent | ||
197 | * hogging the interconnect (no sleeping, though) | ||
198 | */ | ||
199 | if (hwlock->ops->relax) | ||
200 | hwlock->ops->relax(hwlock); | ||
201 | } | ||
202 | |||
203 | return ret; | ||
204 | } | ||
205 | EXPORT_SYMBOL_GPL(__hwspin_lock_timeout); | ||
206 | |||
207 | /** | ||
208 | * __hwspin_unlock() - unlock a specific hwspinlock | ||
209 | * @hwlock: a previously-acquired hwspinlock which we want to unlock | ||
210 | * @mode: controls whether local interrupts needs to be restored or not | ||
211 | * @flags: previous caller's interrupt state to restore (if requested) | ||
212 | * | ||
213 | * This function will unlock a specific hwspinlock, enable preemption and | ||
214 | * (possibly) enable interrupts or restore their previous state. | ||
215 | * @hwlock must be already locked before calling this function: it is a bug | ||
216 | * to call unlock on a @hwlock that is already unlocked. | ||
217 | * | ||
218 | * The user decides whether local interrupts should be enabled or not, and | ||
219 | * if yes, whether he wants their previous state to be restored. It is up | ||
220 | * to the user to choose the appropriate @mode of operation, exactly the | ||
221 | * same way users decide between spin_unlock, spin_unlock_irq and | ||
222 | * spin_unlock_irqrestore. | ||
223 | * | ||
224 | * The function will never sleep. | ||
225 | */ | ||
226 | void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) | ||
227 | { | ||
228 | BUG_ON(!hwlock); | ||
229 | BUG_ON(!flags && mode == HWLOCK_IRQSTATE); | ||
230 | |||
231 | /* | ||
232 | * We must make sure that memory operations (both reads and writes), | ||
233 | * done before unlocking the hwspinlock, will not be reordered | ||
234 | * after the lock is released. | ||
235 | * | ||
236 | * That's the purpose of this explicit memory barrier. | ||
237 | * | ||
238 | * Note: the memory barrier induced by the spin_unlock below is too | ||
239 | * late; the other core is going to access memory soon after it will | ||
240 | * take the hwspinlock, and by then we want to be sure our memory | ||
241 | * operations are already observable. | ||
242 | */ | ||
243 | mb(); | ||
244 | |||
245 | hwlock->ops->unlock(hwlock); | ||
246 | |||
247 | /* Undo the spin_trylock{_irq, _irqsave} called while locking */ | ||
248 | if (mode == HWLOCK_IRQSTATE) | ||
249 | spin_unlock_irqrestore(&hwlock->lock, *flags); | ||
250 | else if (mode == HWLOCK_IRQ) | ||
251 | spin_unlock_irq(&hwlock->lock); | ||
252 | else | ||
253 | spin_unlock(&hwlock->lock); | ||
254 | } | ||
255 | EXPORT_SYMBOL_GPL(__hwspin_unlock); | ||
256 | |||
257 | /** | ||
258 | * hwspin_lock_register() - register a new hw spinlock | ||
259 | * @hwlock: hwspinlock to register. | ||
260 | * | ||
261 | * This function should be called from the underlying platform-specific | ||
262 | * implementation, to register a new hwspinlock instance. | ||
263 | * | ||
264 | * Can be called from an atomic context (will not sleep) but not from | ||
265 | * within interrupt context. | ||
266 | * | ||
267 | * Returns 0 on success, or an appropriate error code on failure | ||
268 | */ | ||
269 | int hwspin_lock_register(struct hwspinlock *hwlock) | ||
270 | { | ||
271 | struct hwspinlock *tmp; | ||
272 | int ret; | ||
273 | |||
274 | if (!hwlock || !hwlock->ops || | ||
275 | !hwlock->ops->trylock || !hwlock->ops->unlock) { | ||
276 | pr_err("invalid parameters\n"); | ||
277 | return -EINVAL; | ||
278 | } | ||
279 | |||
280 | spin_lock_init(&hwlock->lock); | ||
281 | |||
282 | spin_lock(&hwspinlock_tree_lock); | ||
283 | |||
284 | ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock); | ||
285 | if (ret) | ||
286 | goto out; | ||
287 | |||
288 | /* mark this hwspinlock as available */ | ||
289 | tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id, | ||
290 | HWSPINLOCK_UNUSED); | ||
291 | |||
292 | /* self-sanity check which should never fail */ | ||
293 | WARN_ON(tmp != hwlock); | ||
294 | |||
295 | out: | ||
296 | spin_unlock(&hwspinlock_tree_lock); | ||
297 | return ret; | ||
298 | } | ||
299 | EXPORT_SYMBOL_GPL(hwspin_lock_register); | ||
300 | |||
301 | /** | ||
302 | * hwspin_lock_unregister() - unregister an hw spinlock | ||
303 | * @id: index of the specific hwspinlock to unregister | ||
304 | * | ||
305 | * This function should be called from the underlying platform-specific | ||
306 | * implementation, to unregister an existing (and unused) hwspinlock. | ||
307 | * | ||
308 | * Can be called from an atomic context (will not sleep) but not from | ||
309 | * within interrupt context. | ||
310 | * | ||
311 | * Returns the address of hwspinlock @id on success, or NULL on failure | ||
312 | */ | ||
313 | struct hwspinlock *hwspin_lock_unregister(unsigned int id) | ||
314 | { | ||
315 | struct hwspinlock *hwlock = NULL; | ||
316 | int ret; | ||
317 | |||
318 | spin_lock(&hwspinlock_tree_lock); | ||
319 | |||
320 | /* make sure the hwspinlock is not in use (tag is set) */ | ||
321 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); | ||
322 | if (ret == 0) { | ||
323 | pr_err("hwspinlock %d still in use (or not present)\n", id); | ||
324 | goto out; | ||
325 | } | ||
326 | |||
327 | hwlock = radix_tree_delete(&hwspinlock_tree, id); | ||
328 | if (!hwlock) { | ||
329 | pr_err("failed to delete hwspinlock %d\n", id); | ||
330 | goto out; | ||
331 | } | ||
332 | |||
333 | out: | ||
334 | spin_unlock(&hwspinlock_tree_lock); | ||
335 | return hwlock; | ||
336 | } | ||
337 | EXPORT_SYMBOL_GPL(hwspin_lock_unregister); | ||
338 | |||
339 | /** | ||
340 | * __hwspin_lock_request() - tag an hwspinlock as used and power it up | ||
341 | * | ||
342 | * This is an internal function that prepares an hwspinlock instance | ||
343 | * before it is given to the user. The function assumes that | ||
344 | * hwspinlock_tree_lock is taken. | ||
345 | * | ||
346 | * Returns 0 or positive to indicate success, and a negative value to | ||
347 | * indicate an error (with the appropriate error code) | ||
348 | */ | ||
349 | static int __hwspin_lock_request(struct hwspinlock *hwlock) | ||
350 | { | ||
351 | struct hwspinlock *tmp; | ||
352 | int ret; | ||
353 | |||
354 | /* prevent underlying implementation from being removed */ | ||
355 | if (!try_module_get(hwlock->owner)) { | ||
356 | dev_err(hwlock->dev, "%s: can't get owner\n", __func__); | ||
357 | return -EINVAL; | ||
358 | } | ||
359 | |||
360 | /* notify PM core that power is now needed */ | ||
361 | ret = pm_runtime_get_sync(hwlock->dev); | ||
362 | if (ret < 0) { | ||
363 | dev_err(hwlock->dev, "%s: can't power on device\n", __func__); | ||
364 | return ret; | ||
365 | } | ||
366 | |||
367 | /* mark hwspinlock as used, should not fail */ | ||
368 | tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id, | ||
369 | HWSPINLOCK_UNUSED); | ||
370 | |||
371 | /* self-sanity check that should never fail */ | ||
372 | WARN_ON(tmp != hwlock); | ||
373 | |||
374 | return ret; | ||
375 | } | ||
376 | |||
377 | /** | ||
378 | * hwspin_lock_get_id() - retrieve id number of a given hwspinlock | ||
379 | * @hwlock: a valid hwspinlock instance | ||
380 | * | ||
381 | * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid. | ||
382 | */ | ||
383 | int hwspin_lock_get_id(struct hwspinlock *hwlock) | ||
384 | { | ||
385 | if (!hwlock) { | ||
386 | pr_err("invalid hwlock\n"); | ||
387 | return -EINVAL; | ||
388 | } | ||
389 | |||
390 | return hwlock->id; | ||
391 | } | ||
392 | EXPORT_SYMBOL_GPL(hwspin_lock_get_id); | ||
393 | |||
394 | /** | ||
395 | * hwspin_lock_request() - request an hwspinlock | ||
396 | * | ||
397 | * This function should be called by users of the hwspinlock device, | ||
398 | * in order to dynamically assign them an unused hwspinlock. | ||
399 | * Usually the user of this lock will then have to communicate the lock's id | ||
400 | * to the remote core before it can be used for synchronization (to get the | ||
401 | * id of a given hwlock, use hwspin_lock_get_id()). | ||
402 | * | ||
403 | * Can be called from an atomic context (will not sleep) but not from | ||
404 | * within interrupt context (simply because there is no use case for | ||
405 | * that yet). | ||
406 | * | ||
407 | * Returns the address of the assigned hwspinlock, or NULL on error | ||
408 | */ | ||
409 | struct hwspinlock *hwspin_lock_request(void) | ||
410 | { | ||
411 | struct hwspinlock *hwlock; | ||
412 | int ret; | ||
413 | |||
414 | spin_lock(&hwspinlock_tree_lock); | ||
415 | |||
416 | /* look for an unused lock */ | ||
417 | ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, | ||
418 | 0, 1, HWSPINLOCK_UNUSED); | ||
419 | if (ret == 0) { | ||
420 | pr_warn("a free hwspinlock is not available\n"); | ||
421 | hwlock = NULL; | ||
422 | goto out; | ||
423 | } | ||
424 | |||
425 | /* sanity check that should never fail */ | ||
426 | WARN_ON(ret > 1); | ||
427 | |||
428 | /* mark as used and power up */ | ||
429 | ret = __hwspin_lock_request(hwlock); | ||
430 | if (ret < 0) | ||
431 | hwlock = NULL; | ||
432 | |||
433 | out: | ||
434 | spin_unlock(&hwspinlock_tree_lock); | ||
435 | return hwlock; | ||
436 | } | ||
437 | EXPORT_SYMBOL_GPL(hwspin_lock_request); | ||
438 | |||
439 | /** | ||
440 | * hwspin_lock_request_specific() - request for a specific hwspinlock | ||
441 | * @id: index of the specific hwspinlock that is requested | ||
442 | * | ||
443 | * This function should be called by users of the hwspinlock module, | ||
444 | * in order to assign them a specific hwspinlock. | ||
445 | * Usually early board code will be calling this function in order to | ||
446 | * reserve specific hwspinlock ids for predefined purposes. | ||
447 | * | ||
448 | * Can be called from an atomic context (will not sleep) but not from | ||
449 | * within interrupt context (simply because there is no use case for | ||
450 | * that yet). | ||
451 | * | ||
452 | * Returns the address of the assigned hwspinlock, or NULL on error | ||
453 | */ | ||
454 | struct hwspinlock *hwspin_lock_request_specific(unsigned int id) | ||
455 | { | ||
456 | struct hwspinlock *hwlock; | ||
457 | int ret; | ||
458 | |||
459 | spin_lock(&hwspinlock_tree_lock); | ||
460 | |||
461 | /* make sure this hwspinlock exists */ | ||
462 | hwlock = radix_tree_lookup(&hwspinlock_tree, id); | ||
463 | if (!hwlock) { | ||
464 | pr_warn("hwspinlock %u does not exist\n", id); | ||
465 | goto out; | ||
466 | } | ||
467 | |||
468 | /* sanity check (this shouldn't happen) */ | ||
469 | WARN_ON(hwlock->id != id); | ||
470 | |||
471 | /* make sure this hwspinlock is unused */ | ||
472 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); | ||
473 | if (ret == 0) { | ||
474 | pr_warn("hwspinlock %u is already in use\n", id); | ||
475 | hwlock = NULL; | ||
476 | goto out; | ||
477 | } | ||
478 | |||
479 | /* mark as used and power up */ | ||
480 | ret = __hwspin_lock_request(hwlock); | ||
481 | if (ret < 0) | ||
482 | hwlock = NULL; | ||
483 | |||
484 | out: | ||
485 | spin_unlock(&hwspinlock_tree_lock); | ||
486 | return hwlock; | ||
487 | } | ||
488 | EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); | ||
489 | |||
490 | /** | ||
491 | * hwspin_lock_free() - free a specific hwspinlock | ||
492 | * @hwlock: the specific hwspinlock to free | ||
493 | * | ||
494 | * This function mark @hwlock as free again. | ||
495 | * Should only be called with an @hwlock that was retrieved from | ||
496 | * an earlier call to omap_hwspin_lock_request{_specific}. | ||
497 | * | ||
498 | * Can be called from an atomic context (will not sleep) but not from | ||
499 | * within interrupt context (simply because there is no use case for | ||
500 | * that yet). | ||
501 | * | ||
502 | * Returns 0 on success, or an appropriate error code on failure | ||
503 | */ | ||
504 | int hwspin_lock_free(struct hwspinlock *hwlock) | ||
505 | { | ||
506 | struct hwspinlock *tmp; | ||
507 | int ret; | ||
508 | |||
509 | if (!hwlock) { | ||
510 | pr_err("invalid hwlock\n"); | ||
511 | return -EINVAL; | ||
512 | } | ||
513 | |||
514 | spin_lock(&hwspinlock_tree_lock); | ||
515 | |||
516 | /* make sure the hwspinlock is used */ | ||
517 | ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id, | ||
518 | HWSPINLOCK_UNUSED); | ||
519 | if (ret == 1) { | ||
520 | dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__); | ||
521 | dump_stack(); | ||
522 | ret = -EINVAL; | ||
523 | goto out; | ||
524 | } | ||
525 | |||
526 | /* notify the underlying device that power is not needed */ | ||
527 | ret = pm_runtime_put(hwlock->dev); | ||
528 | if (ret < 0) | ||
529 | goto out; | ||
530 | |||
531 | /* mark this hwspinlock as available */ | ||
532 | tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id, | ||
533 | HWSPINLOCK_UNUSED); | ||
534 | |||
535 | /* sanity check (this shouldn't happen) */ | ||
536 | WARN_ON(tmp != hwlock); | ||
537 | |||
538 | module_put(hwlock->owner); | ||
539 | |||
540 | out: | ||
541 | spin_unlock(&hwspinlock_tree_lock); | ||
542 | return ret; | ||
543 | } | ||
544 | EXPORT_SYMBOL_GPL(hwspin_lock_free); | ||
545 | |||
546 | MODULE_LICENSE("GPL v2"); | ||
547 | MODULE_DESCRIPTION("Hardware spinlock interface"); | ||
548 | MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>"); | ||
diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h new file mode 100644 index 000000000000..69935e6b93e5 --- /dev/null +++ b/drivers/hwspinlock/hwspinlock_internal.h | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * Hardware spinlocks internal header | ||
3 | * | ||
4 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * | ||
6 | * Contact: Ohad Ben-Cohen <ohad@wizery.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License version 2 as published | ||
10 | * by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | */ | ||
17 | |||
18 | #ifndef __HWSPINLOCK_HWSPINLOCK_H | ||
19 | #define __HWSPINLOCK_HWSPINLOCK_H | ||
20 | |||
21 | #include <linux/spinlock.h> | ||
22 | #include <linux/device.h> | ||
23 | |||
24 | /** | ||
25 | * struct hwspinlock_ops - platform-specific hwspinlock handlers | ||
26 | * | ||
27 | * @trylock: make a single attempt to take the lock. returns 0 on | ||
28 | * failure and true on success. may _not_ sleep. | ||
29 | * @unlock: release the lock. always succeed. may _not_ sleep. | ||
30 | * @relax: optional, platform-specific relax handler, called by hwspinlock | ||
31 | * core while spinning on a lock, between two successive | ||
32 | * invocations of @trylock. may _not_ sleep. | ||
33 | */ | ||
34 | struct hwspinlock_ops { | ||
35 | int (*trylock)(struct hwspinlock *lock); | ||
36 | void (*unlock)(struct hwspinlock *lock); | ||
37 | void (*relax)(struct hwspinlock *lock); | ||
38 | }; | ||
39 | |||
40 | /** | ||
41 | * struct hwspinlock - this struct represents a single hwspinlock instance | ||
42 | * | ||
43 | * @dev: underlying device, will be used to invoke runtime PM api | ||
44 | * @ops: platform-specific hwspinlock handlers | ||
45 | * @id: a global, unique, system-wide, index of the lock. | ||
46 | * @lock: initialized and used by hwspinlock core | ||
47 | * @owner: underlying implementation module, used to maintain module ref count | ||
48 | * | ||
49 | * Note: currently simplicity was opted for, but later we can squeeze some | ||
50 | * memory bytes by grouping the dev, ops and owner members in a single | ||
51 | * per-platform struct, and have all hwspinlocks point at it. | ||
52 | */ | ||
53 | struct hwspinlock { | ||
54 | struct device *dev; | ||
55 | const struct hwspinlock_ops *ops; | ||
56 | int id; | ||
57 | spinlock_t lock; | ||
58 | struct module *owner; | ||
59 | }; | ||
60 | |||
61 | #endif /* __HWSPINLOCK_HWSPINLOCK_H */ | ||