diff options
-rw-r--r-- | lib/dec_and_lock.c | 49 |
1 files changed, 6 insertions, 43 deletions
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index 305a9663aee3..a65c31455541 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c | |||
@@ -1,47 +1,11 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <linux/spinlock.h> | 2 | #include <linux/spinlock.h> |
3 | #include <asm/atomic.h> | 3 | #include <asm/atomic.h> |
4 | #include <asm/system.h> | ||
5 | 4 | ||
6 | #ifdef __HAVE_ARCH_CMPXCHG | ||
7 | /* | 5 | /* |
8 | * This is an implementation of the notion of "decrement a | 6 | * This is an implementation of the notion of "decrement a |
9 | * reference count, and return locked if it decremented to zero". | 7 | * reference count, and return locked if it decremented to zero". |
10 | * | 8 | * |
11 | * This implementation can be used on any architecture that | ||
12 | * has a cmpxchg, and where atomic->value is an int holding | ||
13 | * the value of the atomic (i.e. the high bits aren't used | ||
14 | * for a lock or anything like that). | ||
15 | */ | ||
16 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | ||
17 | { | ||
18 | int counter; | ||
19 | int newcount; | ||
20 | |||
21 | for (;;) { | ||
22 | counter = atomic_read(atomic); | ||
23 | newcount = counter - 1; | ||
24 | if (!newcount) | ||
25 | break; /* do it the slow way */ | ||
26 | |||
27 | newcount = cmpxchg(&atomic->counter, counter, newcount); | ||
28 | if (newcount == counter) | ||
29 | return 0; | ||
30 | } | ||
31 | |||
32 | spin_lock(lock); | ||
33 | if (atomic_dec_and_test(atomic)) | ||
34 | return 1; | ||
35 | spin_unlock(lock); | ||
36 | return 0; | ||
37 | } | ||
38 | #else | ||
39 | /* | ||
40 | * This is an architecture-neutral, but slow, | ||
41 | * implementation of the notion of "decrement | ||
42 | * a reference count, and return locked if it | ||
43 | * decremented to zero". | ||
44 | * | ||
45 | * NOTE NOTE NOTE! This is _not_ equivalent to | 9 | * NOTE NOTE NOTE! This is _not_ equivalent to |
46 | * | 10 | * |
47 | * if (atomic_dec_and_test(&atomic)) { | 11 | * if (atomic_dec_and_test(&atomic)) { |
@@ -52,21 +16,20 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |||
52 | * | 16 | * |
53 | * because the spin-lock and the decrement must be | 17 | * because the spin-lock and the decrement must be |
54 | * "atomic". | 18 | * "atomic". |
55 | * | ||
56 | * This slow version gets the spinlock unconditionally, | ||
57 | * and releases it if it isn't needed. Architectures | ||
58 | * are encouraged to come up with better approaches, | ||
59 | * this is trivially done efficiently using a load-locked | ||
60 | * store-conditional approach, for example. | ||
61 | */ | 19 | */ |
62 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 20 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
63 | { | 21 | { |
22 | #ifdef CONFIG_SMP | ||
23 | /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ | ||
24 | if (atomic_add_unless(atomic, -1, 1)) | ||
25 | return 0; | ||
26 | #endif | ||
27 | /* Otherwise do it the slow way */ | ||
64 | spin_lock(lock); | 28 | spin_lock(lock); |
65 | if (atomic_dec_and_test(atomic)) | 29 | if (atomic_dec_and_test(atomic)) |
66 | return 1; | 30 | return 1; |
67 | spin_unlock(lock); | 31 | spin_unlock(lock); |
68 | return 0; | 32 | return 0; |
69 | } | 33 | } |
70 | #endif | ||
71 | 34 | ||
72 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 35 | EXPORT_SYMBOL(_atomic_dec_and_lock); |