aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc/lib/dec_and_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc/lib/dec_and_lock.c')
-rw-r--r--arch/ppc/lib/dec_and_lock.c46
1 files changed, 46 insertions, 0 deletions
diff --git a/arch/ppc/lib/dec_and_lock.c b/arch/ppc/lib/dec_and_lock.c
new file mode 100644
index 000000000000..4ee888070d91
--- /dev/null
+++ b/arch/ppc/lib/dec_and_lock.c
@@ -0,0 +1,46 @@
1#include <linux/module.h>
2#include <linux/spinlock.h>
3#include <asm/atomic.h>
4#include <asm/system.h>
5
6/*
7 * This is an implementation of the notion of "decrement a
8 * reference count, and return locked if it decremented to zero".
9 *
10 * This implementation can be used on any architecture that
11 * has a cmpxchg, and where atomic->value is an int holding
12 * the value of the atomic (i.e. the high bits aren't used
13 * for a lock or anything like that).
14 *
15 * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h
16 * if spinlocks are empty and thus atomic_dec_and_lock is defined
17 * to be atomic_dec_and_test - in that case we don't need it
18 * defined here as well.
19 */
20
21#ifndef ATOMIC_DEC_AND_LOCK
22int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
23{
24 int counter;
25 int newcount;
26
27 for (;;) {
28 counter = atomic_read(atomic);
29 newcount = counter - 1;
30 if (!newcount)
31 break; /* do it the slow way */
32
33 newcount = cmpxchg(&atomic->counter, counter, newcount);
34 if (newcount == counter)
35 return 0;
36 }
37
38 spin_lock(lock);
39 if (atomic_dec_and_test(atomic))
40 return 1;
41 spin_unlock(lock);
42 return 0;
43}
44
45EXPORT_SYMBOL(_atomic_dec_and_lock);
46#endif /* ATOMIC_DEC_AND_LOCK */