diff options
author | David S. Miller <davem@davemloft.net> | 2005-09-15 00:47:01 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-09-15 00:47:01 -0400 |
commit | 4db2ce0199f04b6e99999f22e28ef9a0ae5f0d2f (patch) | |
tree | 87a00c97e02a77cdfec517398caa3f1d8f6a2f0d /arch/x86_64/lib | |
parent | 4a805e863d6b9466baf7084e1d6fdbe6e0628d8e (diff) |
[LIB]: Consolidate _atomic_dec_and_lock()
Several implementations were essentialy a common piece of C code using
the cmpxchg() macro. Put the implementation in one spot that everyone
can share, and convert sparc64 over to using this.
Alpha is the lone arch-specific implementation, which codes up a
special fast path for the common case in order to avoid GP reloading
which a pure C version would require.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/x86_64/lib')
-rw-r--r-- | arch/x86_64/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/x86_64/lib/dec_and_lock.c | 40 |
2 files changed, 0 insertions, 42 deletions
diff --git a/arch/x86_64/lib/Makefile b/arch/x86_64/lib/Makefile index 6b26a1c1e9ff..bba5db6cebd6 100644 --- a/arch/x86_64/lib/Makefile +++ b/arch/x86_64/lib/Makefile | |||
@@ -10,5 +10,3 @@ lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \ | |||
10 | usercopy.o getuser.o putuser.o \ | 10 | usercopy.o getuser.o putuser.o \ |
11 | thunk.o clear_page.o copy_page.o bitstr.o bitops.o | 11 | thunk.o clear_page.o copy_page.o bitstr.o bitops.o |
12 | lib-y += memcpy.o memmove.o memset.o copy_user.o | 12 | lib-y += memcpy.o memmove.o memset.o copy_user.o |
13 | |||
14 | lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o | ||
diff --git a/arch/x86_64/lib/dec_and_lock.c b/arch/x86_64/lib/dec_and_lock.c deleted file mode 100644 index ab43394dc775..000000000000 --- a/arch/x86_64/lib/dec_and_lock.c +++ /dev/null | |||
@@ -1,40 +0,0 @@ | |||
1 | /* | ||
2 | * x86 version of "atomic_dec_and_lock()" using | ||
3 | * the atomic "cmpxchg" instruction. | ||
4 | * | ||
5 | * (For CPU's lacking cmpxchg, we use the slow | ||
6 | * generic version, and this one never even gets | ||
7 | * compiled). | ||
8 | */ | ||
9 | |||
10 | #include <linux/spinlock.h> | ||
11 | #include <asm/atomic.h> | ||
12 | |||
13 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | ||
14 | { | ||
15 | int counter; | ||
16 | int newcount; | ||
17 | |||
18 | repeat: | ||
19 | counter = atomic_read(atomic); | ||
20 | newcount = counter-1; | ||
21 | |||
22 | if (!newcount) | ||
23 | goto slow_path; | ||
24 | |||
25 | asm volatile("lock; cmpxchgl %1,%2" | ||
26 | :"=a" (newcount) | ||
27 | :"r" (newcount), "m" (atomic->counter), "0" (counter)); | ||
28 | |||
29 | /* If the above failed, "eax" will have changed */ | ||
30 | if (newcount != counter) | ||
31 | goto repeat; | ||
32 | return 0; | ||
33 | |||
34 | slow_path: | ||
35 | spin_lock(lock); | ||
36 | if (atomic_dec_and_test(atomic)) | ||
37 | return 1; | ||
38 | spin_unlock(lock); | ||
39 | return 0; | ||
40 | } | ||