aboutsummaryrefslogtreecommitdiffstats
path: root/lib/dec_and_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/dec_and_lock.c')
-rw-r--r--lib/dec_and_lock.c40
1 files changed, 40 insertions, 0 deletions
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
new file mode 100644
index 000000000000..6658d81e1836
--- /dev/null
+++ b/lib/dec_and_lock.c
@@ -0,0 +1,40 @@
1#include <linux/module.h>
2#include <linux/spinlock.h>
3#include <asm/atomic.h>
4
5/*
6 * This is an architecture-neutral, but slow,
7 * implementation of the notion of "decrement
8 * a reference count, and return locked if it
9 * decremented to zero".
10 *
11 * NOTE NOTE NOTE! This is _not_ equivalent to
12 *
13 * if (atomic_dec_and_test(&atomic)) {
14 * spin_lock(&lock);
15 * return 1;
16 * }
17 * return 0;
18 *
19 * because the spin-lock and the decrement must be
20 * "atomic".
21 *
22 * This slow version gets the spinlock unconditionally,
23 * and releases it if it isn't needed. Architectures
24 * are encouraged to come up with better approaches,
25 * this is trivially done efficiently using a load-locked
26 * store-conditional approach, for example.
27 */
28
29#ifndef ATOMIC_DEC_AND_LOCK
30int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
31{
32 spin_lock(lock);
33 if (atomic_dec_and_test(atomic))
34 return 1;
35 spin_unlock(lock);
36 return 0;
37}
38
39EXPORT_SYMBOL(_atomic_dec_and_lock);
40#endif