aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOhad Ben-Cohen <ohad@wizery.com>2011-02-17 12:52:03 -0500
committerTony Lindgren <tony@atomide.com>2011-02-17 12:52:03 -0500
commitbd9a4c7df256cee4e9f6a4b56baa3b89d63f0f1e (patch)
treeee6ca0aaebd8e553576a0cf3fefafa1cd3ec8e1d
parentd9e45731debd83e2b249be349993595907dddeae (diff)
drivers: hwspinlock: add framework
Add a platform-independent hwspinlock framework. Hardware spinlock devices are needed, e.g., in order to access data that is shared between remote processors, that otherwise have no alternative mechanism to accomplish synchronization and mutual exclusion operations. Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Cc: Hari Kanigeri <h-kanigeri2@ti.com> Cc: Benoit Cousson <b-cousson@ti.com> Cc: Kevin Hilman <khilman@ti.com> Cc: Grant Likely <grant.likely@secretlab.ca> Cc: Paul Walmsley <paul@pwsan.com> Cc: Russell King <linux@arm.linux.org.uk> Acked-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Tony Lindgren <tony@atomide.com>
-rw-r--r--Documentation/hwspinlock.txt293
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/hwspinlock/Kconfig13
-rw-r--r--drivers/hwspinlock/Makefile5
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c548
-rw-r--r--drivers/hwspinlock/hwspinlock_internal.h61
-rw-r--r--include/linux/hwspinlock.h292
8 files changed, 1216 insertions, 0 deletions
diff --git a/Documentation/hwspinlock.txt b/Documentation/hwspinlock.txt
new file mode 100644
index 000000000000..7dcd1a4e726c
--- /dev/null
+++ b/Documentation/hwspinlock.txt
@@ -0,0 +1,293 @@
1Hardware Spinlock Framework
2
31. Introduction
4
5Hardware spinlock modules provide hardware assistance for synchronization
6and mutual exclusion between heterogeneous processors and those not operating
7under a single, shared operating system.
8
9For example, OMAP4 has dual Cortex-A9, dual Cortex-M3 and a C64x+ DSP,
10each of which is running a different Operating System (the master, A9,
11is usually running Linux and the slave processors, the M3 and the DSP,
12are running some flavor of RTOS).
13
14A generic hwspinlock framework allows platform-independent drivers to use
15the hwspinlock device in order to access data structures that are shared
16between remote processors, that otherwise have no alternative mechanism
17to accomplish synchronization and mutual exclusion operations.
18
19This is necessary, for example, for Inter-processor communications:
20on OMAP4, cpu-intensive multimedia tasks are offloaded by the host to the
21remote M3 and/or C64x+ slave processors (by an IPC subsystem called Syslink).
22
23To achieve fast message-based communications, a minimal kernel support
24is needed to deliver messages arriving from a remote processor to the
25appropriate user process.
26
27This communication is based on simple data structures that is shared between
28the remote processors, and access to it is synchronized using the hwspinlock
29module (remote processor directly places new messages in this shared data
30structure).
31
32A common hwspinlock interface makes it possible to have generic, platform-
33independent, drivers.
34
352. User API
36
37 struct hwspinlock *hwspin_lock_request(void);
38 - dynamically assign an hwspinlock and return its address, or NULL
39 in case an unused hwspinlock isn't available. Users of this
40 API will usually want to communicate the lock's id to the remote core
41 before it can be used to achieve synchronization.
42 Can be called from an atomic context (this function will not sleep) but
43 not from within interrupt context.
44
45 struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
46 - assign a specific hwspinlock id and return its address, or NULL
47 if that hwspinlock is already in use. Usually board code will
48 be calling this function in order to reserve specific hwspinlock
49 ids for predefined purposes.
50 Can be called from an atomic context (this function will not sleep) but
51 not from within interrupt context.
52
53 int hwspin_lock_free(struct hwspinlock *hwlock);
54 - free a previously-assigned hwspinlock; returns 0 on success, or an
55 appropriate error code on failure (e.g. -EINVAL if the hwspinlock
56 is already free).
57 Can be called from an atomic context (this function will not sleep) but
58 not from within interrupt context.
59
60 int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
61 - lock a previously-assigned hwspinlock with a timeout limit (specified in
62 msecs). If the hwspinlock is already taken, the function will busy loop
63 waiting for it to be released, but give up when the timeout elapses.
64 Upon a successful return from this function, preemption is disabled so
65 the caller must not sleep, and is advised to release the hwspinlock as
66 soon as possible, in order to minimize remote cores polling on the
67 hardware interconnect.
68 Returns 0 when successful and an appropriate error code otherwise (most
69 notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
70 The function will never sleep.
71
72 int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int timeout);
73 - lock a previously-assigned hwspinlock with a timeout limit (specified in
74 msecs). If the hwspinlock is already taken, the function will busy loop
75 waiting for it to be released, but give up when the timeout elapses.
76 Upon a successful return from this function, preemption and the local
77 interrupts are disabled, so the caller must not sleep, and is advised to
78 release the hwspinlock as soon as possible.
79 Returns 0 when successful and an appropriate error code otherwise (most
80 notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
81 The function will never sleep.
82
83 int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock, unsigned int to,
84 unsigned long *flags);
85 - lock a previously-assigned hwspinlock with a timeout limit (specified in
86 msecs). If the hwspinlock is already taken, the function will busy loop
87 waiting for it to be released, but give up when the timeout elapses.
88 Upon a successful return from this function, preemption is disabled,
89 local interrupts are disabled and their previous state is saved at the
90 given flags placeholder. The caller must not sleep, and is advised to
91 release the hwspinlock as soon as possible.
92 Returns 0 when successful and an appropriate error code otherwise (most
93 notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
94 The function will never sleep.
95
96 int hwspin_trylock(struct hwspinlock *hwlock);
97 - attempt to lock a previously-assigned hwspinlock, but immediately fail if
98 it is already taken.
99 Upon a successful return from this function, preemption is disabled so
100 caller must not sleep, and is advised to release the hwspinlock as soon as
101 possible, in order to minimize remote cores polling on the hardware
102 interconnect.
103 Returns 0 on success and an appropriate error code otherwise (most
104 notably -EBUSY if the hwspinlock was already taken).
105 The function will never sleep.
106
107 int hwspin_trylock_irq(struct hwspinlock *hwlock);
108 - attempt to lock a previously-assigned hwspinlock, but immediately fail if
109 it is already taken.
110 Upon a successful return from this function, preemption and the local
111 interrupts are disabled so caller must not sleep, and is advised to
112 release the hwspinlock as soon as possible.
113 Returns 0 on success and an appropriate error code otherwise (most
114 notably -EBUSY if the hwspinlock was already taken).
115 The function will never sleep.
116
117 int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags);
118 - attempt to lock a previously-assigned hwspinlock, but immediately fail if
119 it is already taken.
120 Upon a successful return from this function, preemption is disabled,
121 the local interrupts are disabled and their previous state is saved
122 at the given flags placeholder. The caller must not sleep, and is advised
123 to release the hwspinlock as soon as possible.
124 Returns 0 on success and an appropriate error code otherwise (most
125 notably -EBUSY if the hwspinlock was already taken).
126 The function will never sleep.
127
128 void hwspin_unlock(struct hwspinlock *hwlock);
129 - unlock a previously-locked hwspinlock. Always succeed, and can be called
130 from any context (the function never sleeps). Note: code should _never_
131 unlock an hwspinlock which is already unlocked (there is no protection
132 against this).
133
134 void hwspin_unlock_irq(struct hwspinlock *hwlock);
135 - unlock a previously-locked hwspinlock and enable local interrupts.
136 The caller should _never_ unlock an hwspinlock which is already unlocked.
137 Doing so is considered a bug (there is no protection against this).
138 Upon a successful return from this function, preemption and local
139 interrupts are enabled. This function will never sleep.
140
141 void
142 hwspin_unlock_irqrestore(struct hwspinlock *hwlock, unsigned long *flags);
143 - unlock a previously-locked hwspinlock.
144 The caller should _never_ unlock an hwspinlock which is already unlocked.
145 Doing so is considered a bug (there is no protection against this).
146 Upon a successful return from this function, preemption is reenabled,
147 and the state of the local interrupts is restored to the state saved at
148 the given flags. This function will never sleep.
149
150 int hwspin_lock_get_id(struct hwspinlock *hwlock);
151 - retrieve id number of a given hwspinlock. This is needed when an
152 hwspinlock is dynamically assigned: before it can be used to achieve
153 mutual exclusion with a remote cpu, the id number should be communicated
154 to the remote task with which we want to synchronize.
155 Returns the hwspinlock id number, or -EINVAL if hwlock is null.
156
1573. Typical usage
158
159#include <linux/hwspinlock.h>
160#include <linux/err.h>
161
162int hwspinlock_example1(void)
163{
164 struct hwspinlock *hwlock;
165 int ret;
166
167 /* dynamically assign a hwspinlock */
168 hwlock = hwspin_lock_request();
169 if (!hwlock)
170 ...
171
172 id = hwspin_lock_get_id(hwlock);
173 /* probably need to communicate id to a remote processor now */
174
175 /* take the lock, spin for 1 sec if it's already taken */
176 ret = hwspin_lock_timeout(hwlock, 1000);
177 if (ret)
178 ...
179
180 /*
181 * we took the lock, do our thing now, but do NOT sleep
182 */
183
184 /* release the lock */
185 hwspin_unlock(hwlock);
186
187 /* free the lock */
188 ret = hwspin_lock_free(hwlock);
189 if (ret)
190 ...
191
192 return ret;
193}
194
195int hwspinlock_example2(void)
196{
197 struct hwspinlock *hwlock;
198 int ret;
199
200 /*
201 * assign a specific hwspinlock id - this should be called early
202 * by board init code.
203 */
204 hwlock = hwspin_lock_request_specific(PREDEFINED_LOCK_ID);
205 if (!hwlock)
206 ...
207
208 /* try to take it, but don't spin on it */
209 ret = hwspin_trylock(hwlock);
210 if (!ret) {
211 pr_info("lock is already taken\n");
212 return -EBUSY;
213 }
214
215 /*
216 * we took the lock, do our thing now, but do NOT sleep
217 */
218
219 /* release the lock */
220 hwspin_unlock(hwlock);
221
222 /* free the lock */
223 ret = hwspin_lock_free(hwlock);
224 if (ret)
225 ...
226
227 return ret;
228}
229
230
2314. API for implementors
232
233 int hwspin_lock_register(struct hwspinlock *hwlock);
234 - to be called from the underlying platform-specific implementation, in
235 order to register a new hwspinlock instance. Can be called from an atomic
236 context (this function will not sleep) but not from within interrupt
237 context. Returns 0 on success, or appropriate error code on failure.
238
239 struct hwspinlock *hwspin_lock_unregister(unsigned int id);
240 - to be called from the underlying vendor-specific implementation, in order
241 to unregister an existing (and unused) hwspinlock instance.
242 Can be called from an atomic context (will not sleep) but not from
243 within interrupt context.
244 Returns the address of hwspinlock on success, or NULL on error (e.g.
245 if the hwspinlock is sill in use).
246
2475. struct hwspinlock
248
249This struct represents an hwspinlock instance. It is registered by the
250underlying hwspinlock implementation using the hwspin_lock_register() API.
251
252/**
253 * struct hwspinlock - vendor-specific hwspinlock implementation
254 *
255 * @dev: underlying device, will be used with runtime PM api
256 * @ops: vendor-specific hwspinlock handlers
257 * @id: a global, unique, system-wide, index of the lock.
258 * @lock: initialized and used by hwspinlock core
259 * @owner: underlying implementation module, used to maintain module ref count
260 */
261struct hwspinlock {
262 struct device *dev;
263 const struct hwspinlock_ops *ops;
264 int id;
265 spinlock_t lock;
266 struct module *owner;
267};
268
269The underlying implementation is responsible to assign the dev, ops, id and
270owner members. The lock member, OTOH, is initialized and used by the hwspinlock
271core.
272
2736. Implementation callbacks
274
275There are three possible callbacks defined in 'struct hwspinlock_ops':
276
277struct hwspinlock_ops {
278 int (*trylock)(struct hwspinlock *lock);
279 void (*unlock)(struct hwspinlock *lock);
280 void (*relax)(struct hwspinlock *lock);
281};
282
283The first two callbacks are mandatory:
284
285The ->trylock() callback should make a single attempt to take the lock, and
286return 0 on failure and 1 on success. This callback may _not_ sleep.
287
288The ->unlock() callback releases the lock. It always succeed, and it, too,
289may _not_ sleep.
290
291The ->relax() callback is optional. It is called by hwspinlock core while
292spinning on a lock, and can be used by the underlying implementation to force
293a delay between two successive invocations of ->trylock(). It may _not_ sleep.
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9bfb71ff3a6a..177c7d156933 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -117,4 +117,6 @@ source "drivers/staging/Kconfig"
117source "drivers/platform/Kconfig" 117source "drivers/platform/Kconfig"
118 118
119source "drivers/clk/Kconfig" 119source "drivers/clk/Kconfig"
120
121source "drivers/hwspinlock/Kconfig"
120endmenu 122endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index b423bb16c3a8..3f135b6fb014 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -117,3 +117,5 @@ obj-y += platform/
117obj-y += ieee802154/ 117obj-y += ieee802154/
118#common clk code 118#common clk code
119obj-y += clk/ 119obj-y += clk/
120
121obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
new file mode 100644
index 000000000000..9dd8db46606b
--- /dev/null
+++ b/drivers/hwspinlock/Kconfig
@@ -0,0 +1,13 @@
1#
2# Generic HWSPINLOCK framework
3#
4
5config HWSPINLOCK
6 tristate "Generic Hardware Spinlock framework"
7 help
8 Say y here to support the generic hardware spinlock framework.
9 You only need to enable this if you have hardware spinlock module
10 on your system (usually only relevant if your system has remote slave
11 coprocessors).
12
13 If unsure, say N.
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
new file mode 100644
index 000000000000..b9d2b9f40491
--- /dev/null
+++ b/drivers/hwspinlock/Makefile
@@ -0,0 +1,5 @@
1#
2# Generic Hardware Spinlock framework
3#
4
5obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
new file mode 100644
index 000000000000..43a62714b4fb
--- /dev/null
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -0,0 +1,548 @@
1/*
2 * Hardware spinlock framework
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#define pr_fmt(fmt) "%s: " fmt, __func__
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/types.h>
24#include <linux/err.h>
25#include <linux/jiffies.h>
26#include <linux/radix-tree.h>
27#include <linux/hwspinlock.h>
28#include <linux/pm_runtime.h>
29
30#include "hwspinlock_internal.h"
31
32/* radix tree tags */
33#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
34
35/*
36 * A radix tree is used to maintain the available hwspinlock instances.
37 * The tree associates hwspinlock pointers with their integer key id,
38 * and provides easy-to-use API which makes the hwspinlock core code simple
39 * and easy to read.
40 *
41 * Radix trees are quick on lookups, and reasonably efficient in terms of
42 * storage, especially with high density usages such as this framework
43 * requires (a continuous range of integer keys, beginning with zero, is
44 * used as the ID's of the hwspinlock instances).
45 *
46 * The radix tree API supports tagging items in the tree, which this
47 * framework uses to mark unused hwspinlock instances (see the
48 * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
49 * tree, looking for an unused hwspinlock instance, is now reduced to a
50 * single radix tree API call.
51 */
52static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
53
54/*
55 * Synchronization of access to the tree is achieved using this spinlock,
56 * as the radix-tree API requires that users provide all synchronisation.
57 */
58static DEFINE_SPINLOCK(hwspinlock_tree_lock);
59
60/**
61 * __hwspin_trylock() - attempt to lock a specific hwspinlock
62 * @hwlock: an hwspinlock which we want to trylock
63 * @mode: controls whether local interrupts are disabled or not
64 * @flags: a pointer where the caller's interrupt state will be saved at (if
65 * requested)
66 *
67 * This function attempts to lock an hwspinlock, and will immediately
68 * fail if the hwspinlock is already taken.
69 *
70 * Upon a successful return from this function, preemption (and possibly
71 * interrupts) is disabled, so the caller must not sleep, and is advised to
72 * release the hwspinlock as soon as possible. This is required in order to
73 * minimize remote cores polling on the hardware interconnect.
74 *
75 * The user decides whether local interrupts are disabled or not, and if yes,
76 * whether he wants their previous state to be saved. It is up to the user
77 * to choose the appropriate @mode of operation, exactly the same way users
78 * should decide between spin_trylock, spin_trylock_irq and
79 * spin_trylock_irqsave.
80 *
81 * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
82 * the hwspinlock was already taken.
83 * This function will never sleep.
84 */
85int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
86{
87 int ret;
88
89 BUG_ON(!hwlock);
90 BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
91
92 /*
93 * This spin_lock{_irq, _irqsave} serves three purposes:
94 *
95 * 1. Disable preemption, in order to minimize the period of time
96 * in which the hwspinlock is taken. This is important in order
97 * to minimize the possible polling on the hardware interconnect
98 * by a remote user of this lock.
99 * 2. Make the hwspinlock SMP-safe (so we can take it from
100 * additional contexts on the local host).
101 * 3. Ensure that in_atomic/might_sleep checks catch potential
102 * problems with hwspinlock usage (e.g. scheduler checks like
103 * 'scheduling while atomic' etc.)
104 */
105 if (mode == HWLOCK_IRQSTATE)
106 ret = spin_trylock_irqsave(&hwlock->lock, *flags);
107 else if (mode == HWLOCK_IRQ)
108 ret = spin_trylock_irq(&hwlock->lock);
109 else
110 ret = spin_trylock(&hwlock->lock);
111
112 /* is lock already taken by another context on the local cpu ? */
113 if (!ret)
114 return -EBUSY;
115
116 /* try to take the hwspinlock device */
117 ret = hwlock->ops->trylock(hwlock);
118
119 /* if hwlock is already taken, undo spin_trylock_* and exit */
120 if (!ret) {
121 if (mode == HWLOCK_IRQSTATE)
122 spin_unlock_irqrestore(&hwlock->lock, *flags);
123 else if (mode == HWLOCK_IRQ)
124 spin_unlock_irq(&hwlock->lock);
125 else
126 spin_unlock(&hwlock->lock);
127
128 return -EBUSY;
129 }
130
131 /*
132 * We can be sure the other core's memory operations
133 * are observable to us only _after_ we successfully take
134 * the hwspinlock, and we must make sure that subsequent memory
135 * operations (both reads and writes) will not be reordered before
136 * we actually took the hwspinlock.
137 *
138 * Note: the implicit memory barrier of the spinlock above is too
139 * early, so we need this additional explicit memory barrier.
140 */
141 mb();
142
143 return 0;
144}
145EXPORT_SYMBOL_GPL(__hwspin_trylock);
146
147/**
148 * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
149 * @hwlock: the hwspinlock to be locked
150 * @timeout: timeout value in msecs
151 * @mode: mode which controls whether local interrupts are disabled or not
152 * @flags: a pointer to where the caller's interrupt state will be saved at (if
153 * requested)
154 *
155 * This function locks the given @hwlock. If the @hwlock
156 * is already taken, the function will busy loop waiting for it to
157 * be released, but give up after @timeout msecs have elapsed.
158 *
159 * Upon a successful return from this function, preemption is disabled
160 * (and possibly local interrupts, too), so the caller must not sleep,
161 * and is advised to release the hwspinlock as soon as possible.
162 * This is required in order to minimize remote cores polling on the
163 * hardware interconnect.
164 *
165 * The user decides whether local interrupts are disabled or not, and if yes,
166 * whether he wants their previous state to be saved. It is up to the user
167 * to choose the appropriate @mode of operation, exactly the same way users
168 * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
169 *
170 * Returns 0 when the @hwlock was successfully taken, and an appropriate
171 * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
172 * busy after @timeout msecs). The function will never sleep.
173 */
174int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
175 int mode, unsigned long *flags)
176{
177 int ret;
178 unsigned long expire;
179
180 expire = msecs_to_jiffies(to) + jiffies;
181
182 for (;;) {
183 /* Try to take the hwspinlock */
184 ret = __hwspin_trylock(hwlock, mode, flags);
185 if (ret != -EBUSY)
186 break;
187
188 /*
189 * The lock is already taken, let's check if the user wants
190 * us to try again
191 */
192 if (time_is_before_eq_jiffies(expire))
193 return -ETIMEDOUT;
194
195 /*
196 * Allow platform-specific relax handlers to prevent
197 * hogging the interconnect (no sleeping, though)
198 */
199 if (hwlock->ops->relax)
200 hwlock->ops->relax(hwlock);
201 }
202
203 return ret;
204}
205EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
206
207/**
208 * __hwspin_unlock() - unlock a specific hwspinlock
209 * @hwlock: a previously-acquired hwspinlock which we want to unlock
210 * @mode: controls whether local interrupts needs to be restored or not
211 * @flags: previous caller's interrupt state to restore (if requested)
212 *
213 * This function will unlock a specific hwspinlock, enable preemption and
214 * (possibly) enable interrupts or restore their previous state.
215 * @hwlock must be already locked before calling this function: it is a bug
216 * to call unlock on a @hwlock that is already unlocked.
217 *
218 * The user decides whether local interrupts should be enabled or not, and
219 * if yes, whether he wants their previous state to be restored. It is up
220 * to the user to choose the appropriate @mode of operation, exactly the
221 * same way users decide between spin_unlock, spin_unlock_irq and
222 * spin_unlock_irqrestore.
223 *
224 * The function will never sleep.
225 */
226void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
227{
228 BUG_ON(!hwlock);
229 BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
230
231 /*
232 * We must make sure that memory operations (both reads and writes),
233 * done before unlocking the hwspinlock, will not be reordered
234 * after the lock is released.
235 *
236 * That's the purpose of this explicit memory barrier.
237 *
238 * Note: the memory barrier induced by the spin_unlock below is too
239 * late; the other core is going to access memory soon after it will
240 * take the hwspinlock, and by then we want to be sure our memory
241 * operations are already observable.
242 */
243 mb();
244
245 hwlock->ops->unlock(hwlock);
246
247 /* Undo the spin_trylock{_irq, _irqsave} called while locking */
248 if (mode == HWLOCK_IRQSTATE)
249 spin_unlock_irqrestore(&hwlock->lock, *flags);
250 else if (mode == HWLOCK_IRQ)
251 spin_unlock_irq(&hwlock->lock);
252 else
253 spin_unlock(&hwlock->lock);
254}
255EXPORT_SYMBOL_GPL(__hwspin_unlock);
256
257/**
258 * hwspin_lock_register() - register a new hw spinlock
259 * @hwlock: hwspinlock to register.
260 *
261 * This function should be called from the underlying platform-specific
262 * implementation, to register a new hwspinlock instance.
263 *
264 * Can be called from an atomic context (will not sleep) but not from
265 * within interrupt context.
266 *
267 * Returns 0 on success, or an appropriate error code on failure
268 */
269int hwspin_lock_register(struct hwspinlock *hwlock)
270{
271 struct hwspinlock *tmp;
272 int ret;
273
274 if (!hwlock || !hwlock->ops ||
275 !hwlock->ops->trylock || !hwlock->ops->unlock) {
276 pr_err("invalid parameters\n");
277 return -EINVAL;
278 }
279
280 spin_lock_init(&hwlock->lock);
281
282 spin_lock(&hwspinlock_tree_lock);
283
284 ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
285 if (ret)
286 goto out;
287
288 /* mark this hwspinlock as available */
289 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
290 HWSPINLOCK_UNUSED);
291
292 /* self-sanity check which should never fail */
293 WARN_ON(tmp != hwlock);
294
295out:
296 spin_unlock(&hwspinlock_tree_lock);
297 return ret;
298}
299EXPORT_SYMBOL_GPL(hwspin_lock_register);
300
301/**
302 * hwspin_lock_unregister() - unregister an hw spinlock
303 * @id: index of the specific hwspinlock to unregister
304 *
305 * This function should be called from the underlying platform-specific
306 * implementation, to unregister an existing (and unused) hwspinlock.
307 *
308 * Can be called from an atomic context (will not sleep) but not from
309 * within interrupt context.
310 *
311 * Returns the address of hwspinlock @id on success, or NULL on failure
312 */
313struct hwspinlock *hwspin_lock_unregister(unsigned int id)
314{
315 struct hwspinlock *hwlock = NULL;
316 int ret;
317
318 spin_lock(&hwspinlock_tree_lock);
319
320 /* make sure the hwspinlock is not in use (tag is set) */
321 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
322 if (ret == 0) {
323 pr_err("hwspinlock %d still in use (or not present)\n", id);
324 goto out;
325 }
326
327 hwlock = radix_tree_delete(&hwspinlock_tree, id);
328 if (!hwlock) {
329 pr_err("failed to delete hwspinlock %d\n", id);
330 goto out;
331 }
332
333out:
334 spin_unlock(&hwspinlock_tree_lock);
335 return hwlock;
336}
337EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
338
339/**
340 * __hwspin_lock_request() - tag an hwspinlock as used and power it up
341 *
342 * This is an internal function that prepares an hwspinlock instance
343 * before it is given to the user. The function assumes that
344 * hwspinlock_tree_lock is taken.
345 *
346 * Returns 0 or positive to indicate success, and a negative value to
347 * indicate an error (with the appropriate error code)
348 */
349static int __hwspin_lock_request(struct hwspinlock *hwlock)
350{
351 struct hwspinlock *tmp;
352 int ret;
353
354 /* prevent underlying implementation from being removed */
355 if (!try_module_get(hwlock->owner)) {
356 dev_err(hwlock->dev, "%s: can't get owner\n", __func__);
357 return -EINVAL;
358 }
359
360 /* notify PM core that power is now needed */
361 ret = pm_runtime_get_sync(hwlock->dev);
362 if (ret < 0) {
363 dev_err(hwlock->dev, "%s: can't power on device\n", __func__);
364 return ret;
365 }
366
367 /* mark hwspinlock as used, should not fail */
368 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id,
369 HWSPINLOCK_UNUSED);
370
371 /* self-sanity check that should never fail */
372 WARN_ON(tmp != hwlock);
373
374 return ret;
375}
376
377/**
378 * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
379 * @hwlock: a valid hwspinlock instance
380 *
381 * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
382 */
383int hwspin_lock_get_id(struct hwspinlock *hwlock)
384{
385 if (!hwlock) {
386 pr_err("invalid hwlock\n");
387 return -EINVAL;
388 }
389
390 return hwlock->id;
391}
392EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
393
394/**
395 * hwspin_lock_request() - request an hwspinlock
396 *
397 * This function should be called by users of the hwspinlock device,
398 * in order to dynamically assign them an unused hwspinlock.
399 * Usually the user of this lock will then have to communicate the lock's id
400 * to the remote core before it can be used for synchronization (to get the
401 * id of a given hwlock, use hwspin_lock_get_id()).
402 *
403 * Can be called from an atomic context (will not sleep) but not from
404 * within interrupt context (simply because there is no use case for
405 * that yet).
406 *
407 * Returns the address of the assigned hwspinlock, or NULL on error
408 */
409struct hwspinlock *hwspin_lock_request(void)
410{
411 struct hwspinlock *hwlock;
412 int ret;
413
414 spin_lock(&hwspinlock_tree_lock);
415
416 /* look for an unused lock */
417 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
418 0, 1, HWSPINLOCK_UNUSED);
419 if (ret == 0) {
420 pr_warn("a free hwspinlock is not available\n");
421 hwlock = NULL;
422 goto out;
423 }
424
425 /* sanity check that should never fail */
426 WARN_ON(ret > 1);
427
428 /* mark as used and power up */
429 ret = __hwspin_lock_request(hwlock);
430 if (ret < 0)
431 hwlock = NULL;
432
433out:
434 spin_unlock(&hwspinlock_tree_lock);
435 return hwlock;
436}
437EXPORT_SYMBOL_GPL(hwspin_lock_request);
438
439/**
440 * hwspin_lock_request_specific() - request for a specific hwspinlock
441 * @id: index of the specific hwspinlock that is requested
442 *
443 * This function should be called by users of the hwspinlock module,
444 * in order to assign them a specific hwspinlock.
445 * Usually early board code will be calling this function in order to
446 * reserve specific hwspinlock ids for predefined purposes.
447 *
448 * Can be called from an atomic context (will not sleep) but not from
449 * within interrupt context (simply because there is no use case for
450 * that yet).
451 *
452 * Returns the address of the assigned hwspinlock, or NULL on error
453 */
454struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
455{
456 struct hwspinlock *hwlock;
457 int ret;
458
459 spin_lock(&hwspinlock_tree_lock);
460
461 /* make sure this hwspinlock exists */
462 hwlock = radix_tree_lookup(&hwspinlock_tree, id);
463 if (!hwlock) {
464 pr_warn("hwspinlock %u does not exist\n", id);
465 goto out;
466 }
467
468 /* sanity check (this shouldn't happen) */
469 WARN_ON(hwlock->id != id);
470
471 /* make sure this hwspinlock is unused */
472 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
473 if (ret == 0) {
474 pr_warn("hwspinlock %u is already in use\n", id);
475 hwlock = NULL;
476 goto out;
477 }
478
479 /* mark as used and power up */
480 ret = __hwspin_lock_request(hwlock);
481 if (ret < 0)
482 hwlock = NULL;
483
484out:
485 spin_unlock(&hwspinlock_tree_lock);
486 return hwlock;
487}
488EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
489
490/**
491 * hwspin_lock_free() - free a specific hwspinlock
492 * @hwlock: the specific hwspinlock to free
493 *
494 * This function mark @hwlock as free again.
495 * Should only be called with an @hwlock that was retrieved from
496 * an earlier call to omap_hwspin_lock_request{_specific}.
497 *
498 * Can be called from an atomic context (will not sleep) but not from
499 * within interrupt context (simply because there is no use case for
500 * that yet).
501 *
502 * Returns 0 on success, or an appropriate error code on failure
503 */
504int hwspin_lock_free(struct hwspinlock *hwlock)
505{
506 struct hwspinlock *tmp;
507 int ret;
508
509 if (!hwlock) {
510 pr_err("invalid hwlock\n");
511 return -EINVAL;
512 }
513
514 spin_lock(&hwspinlock_tree_lock);
515
516 /* make sure the hwspinlock is used */
517 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
518 HWSPINLOCK_UNUSED);
519 if (ret == 1) {
520 dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__);
521 dump_stack();
522 ret = -EINVAL;
523 goto out;
524 }
525
526 /* notify the underlying device that power is not needed */
527 ret = pm_runtime_put(hwlock->dev);
528 if (ret < 0)
529 goto out;
530
531 /* mark this hwspinlock as available */
532 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
533 HWSPINLOCK_UNUSED);
534
535 /* sanity check (this shouldn't happen) */
536 WARN_ON(tmp != hwlock);
537
538 module_put(hwlock->owner);
539
540out:
541 spin_unlock(&hwspinlock_tree_lock);
542 return ret;
543}
544EXPORT_SYMBOL_GPL(hwspin_lock_free);
545
546MODULE_LICENSE("GPL v2");
547MODULE_DESCRIPTION("Hardware spinlock interface");
548MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
new file mode 100644
index 000000000000..69935e6b93e5
--- /dev/null
+++ b/drivers/hwspinlock/hwspinlock_internal.h
@@ -0,0 +1,61 @@
1/*
2 * Hardware spinlocks internal header
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#ifndef __HWSPINLOCK_HWSPINLOCK_H
19#define __HWSPINLOCK_HWSPINLOCK_H
20
21#include <linux/spinlock.h>
22#include <linux/device.h>
23
24/**
25 * struct hwspinlock_ops - platform-specific hwspinlock handlers
26 *
27 * @trylock: make a single attempt to take the lock. returns 0 on
28 * failure and true on success. may _not_ sleep.
29 * @unlock: release the lock. always succeed. may _not_ sleep.
30 * @relax: optional, platform-specific relax handler, called by hwspinlock
31 * core while spinning on a lock, between two successive
32 * invocations of @trylock. may _not_ sleep.
33 */
34struct hwspinlock_ops {
35 int (*trylock)(struct hwspinlock *lock);
36 void (*unlock)(struct hwspinlock *lock);
37 void (*relax)(struct hwspinlock *lock);
38};
39
40/**
41 * struct hwspinlock - this struct represents a single hwspinlock instance
42 *
43 * @dev: underlying device, will be used to invoke runtime PM api
44 * @ops: platform-specific hwspinlock handlers
45 * @id: a global, unique, system-wide, index of the lock.
46 * @lock: initialized and used by hwspinlock core
47 * @owner: underlying implementation module, used to maintain module ref count
48 *
49 * Note: currently simplicity was opted for, but later we can squeeze some
50 * memory bytes by grouping the dev, ops and owner members in a single
51 * per-platform struct, and have all hwspinlocks point at it.
52 */
53struct hwspinlock {
54 struct device *dev;
55 const struct hwspinlock_ops *ops;
56 int id;
57 spinlock_t lock;
58 struct module *owner;
59};
60
61#endif /* __HWSPINLOCK_HWSPINLOCK_H */
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
new file mode 100644
index 000000000000..8390efc457eb
--- /dev/null
+++ b/include/linux/hwspinlock.h
@@ -0,0 +1,292 @@
1/*
2 * Hardware spinlock public header
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#ifndef __LINUX_HWSPINLOCK_H
19#define __LINUX_HWSPINLOCK_H
20
21#include <linux/err.h>
22#include <linux/sched.h>
23
24/* hwspinlock mode argument */
25#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
26#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
27
28struct hwspinlock;
29
30#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
31
32int hwspin_lock_register(struct hwspinlock *lock);
33struct hwspinlock *hwspin_lock_unregister(unsigned int id);
34struct hwspinlock *hwspin_lock_request(void);
35struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
36int hwspin_lock_free(struct hwspinlock *hwlock);
37int hwspin_lock_get_id(struct hwspinlock *hwlock);
38int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
39 unsigned long *);
40int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
41void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
42
43#else /* !CONFIG_HWSPINLOCK */
44
45/*
46 * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
47 * enabled. We prefer to silently succeed in this case, and let the
48 * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
49 * required on a given setup, users will still work.
50 *
51 * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
52 * we _do_ want users to fail (no point in registering hwspinlock instances if
53 * the framework is not available).
54 *
55 * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
56 * users. Others, which care, can still check this with IS_ERR.
57 */
58static inline struct hwspinlock *hwspin_lock_request(void)
59{
60 return ERR_PTR(-ENODEV);
61}
62
63static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
64{
65 return ERR_PTR(-ENODEV);
66}
67
68static inline int hwspin_lock_free(struct hwspinlock *hwlock)
69{
70 return 0;
71}
72
73static inline
74int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
75 int mode, unsigned long *flags)
76{
77 return 0;
78}
79
80static inline
81int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
82{
83 return 0;
84}
85
86static inline
87void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
88{
89 return 0;
90}
91
92static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
93{
94 return 0;
95}
96
97static inline int hwspin_lock_register(struct hwspinlock *hwlock)
98{
99 return -ENODEV;
100}
101
102static inline struct hwspinlock *hwspin_lock_unregister(unsigned int id)
103{
104 return NULL;
105}
106
107#endif /* !CONFIG_HWSPINLOCK */
108
109/**
110 * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
111 * @hwlock: an hwspinlock which we want to trylock
112 * @flags: a pointer to where the caller's interrupt state will be saved at
113 *
114 * This function attempts to lock the underlying hwspinlock, and will
115 * immediately fail if the hwspinlock is already locked.
116 *
117 * Upon a successful return from this function, preemption and local
118 * interrupts are disabled (previous interrupts state is saved at @flags),
119 * so the caller must not sleep, and is advised to release the hwspinlock
120 * as soon as possible.
121 *
122 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
123 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
124 */
125static inline
126int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
127{
128 return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
129}
130
131/**
132 * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
133 * @hwlock: an hwspinlock which we want to trylock
134 *
135 * This function attempts to lock the underlying hwspinlock, and will
136 * immediately fail if the hwspinlock is already locked.
137 *
138 * Upon a successful return from this function, preemption and local
139 * interrupts are disabled, so the caller must not sleep, and is advised
140 * to release the hwspinlock as soon as possible.
141 *
142 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
143 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
144 */
145static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
146{
147 return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
148}
149
150/**
151 * hwspin_trylock() - attempt to lock a specific hwspinlock
152 * @hwlock: an hwspinlock which we want to trylock
153 *
154 * This function attempts to lock an hwspinlock, and will immediately fail
155 * if the hwspinlock is already taken.
156 *
157 * Upon a successful return from this function, preemption is disabled,
158 * so the caller must not sleep, and is advised to release the hwspinlock
159 * as soon as possible. This is required in order to minimize remote cores
160 * polling on the hardware interconnect.
161 *
162 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
163 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
164 */
165static inline int hwspin_trylock(struct hwspinlock *hwlock)
166{
167 return __hwspin_trylock(hwlock, 0, NULL);
168}
169
170/**
171 * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
172 * @hwlock: the hwspinlock to be locked
173 * @to: timeout value in msecs
174 * @flags: a pointer to where the caller's interrupt state will be saved at
175 *
176 * This function locks the underlying @hwlock. If the @hwlock
177 * is already taken, the function will busy loop waiting for it to
178 * be released, but give up when @timeout msecs have elapsed.
179 *
180 * Upon a successful return from this function, preemption and local interrupts
181 * are disabled (plus previous interrupt state is saved), so the caller must
182 * not sleep, and is advised to release the hwspinlock as soon as possible.
183 *
184 * Returns 0 when the @hwlock was successfully taken, and an appropriate
185 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
186 * busy after @timeout msecs). The function will never sleep.
187 */
188static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
189 unsigned int to, unsigned long *flags)
190{
191 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
192}
193
194/**
195 * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
196 * @hwlock: the hwspinlock to be locked
197 * @to: timeout value in msecs
198 *
199 * This function locks the underlying @hwlock. If the @hwlock
200 * is already taken, the function will busy loop waiting for it to
201 * be released, but give up when @timeout msecs have elapsed.
202 *
203 * Upon a successful return from this function, preemption and local interrupts
204 * are disabled so the caller must not sleep, and is advised to release the
205 * hwspinlock as soon as possible.
206 *
207 * Returns 0 when the @hwlock was successfully taken, and an appropriate
208 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
209 * busy after @timeout msecs). The function will never sleep.
210 */
211static inline
212int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
213{
214 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
215}
216
217/**
218 * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
219 * @hwlock: the hwspinlock to be locked
220 * @to: timeout value in msecs
221 *
222 * This function locks the underlying @hwlock. If the @hwlock
223 * is already taken, the function will busy loop waiting for it to
224 * be released, but give up when @timeout msecs have elapsed.
225 *
226 * Upon a successful return from this function, preemption is disabled
227 * so the caller must not sleep, and is advised to release the hwspinlock
228 * as soon as possible.
229 * This is required in order to minimize remote cores polling on the
230 * hardware interconnect.
231 *
232 * Returns 0 when the @hwlock was successfully taken, and an appropriate
233 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
234 * busy after @timeout msecs). The function will never sleep.
235 */
236static inline
237int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
238{
239 return __hwspin_lock_timeout(hwlock, to, 0, NULL);
240}
241
242/**
243 * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
244 * @hwlock: a previously-acquired hwspinlock which we want to unlock
245 * @flags: previous caller's interrupt state to restore
246 *
247 * This function will unlock a specific hwspinlock, enable preemption and
248 * restore the previous state of the local interrupts. It should be used
249 * to undo, e.g., hwspin_trylock_irqsave().
250 *
251 * @hwlock must be already locked before calling this function: it is a bug
252 * to call unlock on a @hwlock that is already unlocked.
253 */
254static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
255 unsigned long *flags)
256{
257 __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
258}
259
260/**
261 * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
262 * @hwlock: a previously-acquired hwspinlock which we want to unlock
263 *
264 * This function will unlock a specific hwspinlock, enable preemption and
265 * enable local interrupts. Should be used to undo hwspin_lock_irq().
266 *
267 * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
268 * calling this function: it is a bug to call unlock on a @hwlock that is
269 * already unlocked.
270 */
271static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
272{
273 __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
274}
275
276/**
277 * hwspin_unlock() - unlock hwspinlock
278 * @hwlock: a previously-acquired hwspinlock which we want to unlock
279 *
280 * This function will unlock a specific hwspinlock and enable preemption
281 * back.
282 *
283 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
284 * this function: it is a bug to call unlock on a @hwlock that is already
285 * unlocked.
286 */
287static inline void hwspin_unlock(struct hwspinlock *hwlock)
288{
289 __hwspin_unlock(hwlock, 0, NULL);
290}
291
292#endif /* __LINUX_HWSPINLOCK_H */