aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-09-15 00:47:01 -0400
committerDavid S. Miller <davem@davemloft.net>2005-09-15 00:47:01 -0400
commit4db2ce0199f04b6e99999f22e28ef9a0ae5f0d2f (patch)
tree87a00c97e02a77cdfec517398caa3f1d8f6a2f0d
parent4a805e863d6b9466baf7084e1d6fdbe6e0628d8e (diff)
[LIB]: Consolidate _atomic_dec_and_lock()
Several implementations were essentialy a common piece of C code using the cmpxchg() macro. Put the implementation in one spot that everyone can share, and convert sparc64 over to using this. Alpha is the lone arch-specific implementation, which codes up a special fast path for the common case in order to avoid GP reloading which a pure C version would require. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/i386/Kconfig5
-rw-r--r--arch/i386/lib/Makefile1
-rw-r--r--arch/i386/lib/dec_and_lock.c42
-rw-r--r--arch/ia64/Kconfig5
-rw-r--r--arch/ia64/lib/Makefile1
-rw-r--r--arch/ia64/lib/dec_and_lock.c42
-rw-r--r--arch/m32r/Kconfig5
-rw-r--r--arch/mips/Kconfig4
-rw-r--r--arch/mips/lib/Makefile2
-rw-r--r--arch/mips/lib/dec_and_lock.c47
-rw-r--r--arch/ppc/Kconfig4
-rw-r--r--arch/ppc/lib/Makefile2
-rw-r--r--arch/ppc/lib/dec_and_lock.c38
-rw-r--r--arch/ppc64/Kconfig4
-rw-r--r--arch/ppc64/lib/Makefile2
-rw-r--r--arch/ppc64/lib/dec_and_lock.c47
-rw-r--r--arch/sparc64/Kconfig.debug8
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c3
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/dec_and_lock.S80
-rw-r--r--arch/x86_64/Kconfig5
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c4
-rw-r--r--arch/x86_64/lib/Makefile2
-rw-r--r--arch/x86_64/lib/dec_and_lock.c40
-rw-r--r--arch/xtensa/Kconfig4
-rw-r--r--lib/dec_and_lock.c35
26 files changed, 38 insertions, 396 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index b22f003eaa6d..d2703cda61ea 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -908,11 +908,6 @@ config IRQBALANCE
908 The default yes will allow the kernel to do irq load balancing. 908 The default yes will allow the kernel to do irq load balancing.
909 Saying no will keep the kernel from doing irq load balancing. 909 Saying no will keep the kernel from doing irq load balancing.
910 910
911config HAVE_DEC_LOCK
912 bool
913 depends on (SMP || PREEMPT) && X86_CMPXCHG
914 default y
915
916# turning this on wastes a bunch of space. 911# turning this on wastes a bunch of space.
917# Summit needs it only when NUMA is on 912# Summit needs it only when NUMA is on
918config BOOT_IOREMAP 913config BOOT_IOREMAP
diff --git a/arch/i386/lib/Makefile b/arch/i386/lib/Makefile
index 7b1932d20f96..914933e9ec3d 100644
--- a/arch/i386/lib/Makefile
+++ b/arch/i386/lib/Makefile
@@ -7,4 +7,3 @@ lib-y = checksum.o delay.o usercopy.o getuser.o putuser.o memcpy.o strstr.o \
7 bitops.o 7 bitops.o
8 8
9lib-$(CONFIG_X86_USE_3DNOW) += mmx.o 9lib-$(CONFIG_X86_USE_3DNOW) += mmx.o
10lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
diff --git a/arch/i386/lib/dec_and_lock.c b/arch/i386/lib/dec_and_lock.c
deleted file mode 100644
index 8b81b2524fa6..000000000000
--- a/arch/i386/lib/dec_and_lock.c
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * x86 version of "atomic_dec_and_lock()" using
3 * the atomic "cmpxchg" instruction.
4 *
5 * (For CPU's lacking cmpxchg, we use the slow
6 * generic version, and this one never even gets
7 * compiled).
8 */
9
10#include <linux/spinlock.h>
11#include <linux/module.h>
12#include <asm/atomic.h>
13
14int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
15{
16 int counter;
17 int newcount;
18
19repeat:
20 counter = atomic_read(atomic);
21 newcount = counter-1;
22
23 if (!newcount)
24 goto slow_path;
25
26 asm volatile("lock; cmpxchgl %1,%2"
27 :"=a" (newcount)
28 :"r" (newcount), "m" (atomic->counter), "0" (counter));
29
30 /* If the above failed, "eax" will have changed */
31 if (newcount != counter)
32 goto repeat;
33 return 0;
34
35slow_path:
36 spin_lock(lock);
37 if (atomic_dec_and_test(atomic))
38 return 1;
39 spin_unlock(lock);
40 return 0;
41}
42EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ed25d66c8d50..945c15a0722b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -298,11 +298,6 @@ config PREEMPT
298 298
299source "mm/Kconfig" 299source "mm/Kconfig"
300 300
301config HAVE_DEC_LOCK
302 bool
303 depends on (SMP || PREEMPT)
304 default y
305
306config IA32_SUPPORT 301config IA32_SUPPORT
307 bool "Support for Linux/x86 binaries" 302 bool "Support for Linux/x86 binaries"
308 help 303 help
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index 799407e7726f..cb1af597370b 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -15,7 +15,6 @@ lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
15lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o 15lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
16lib-$(CONFIG_PERFMON) += carta_random.o 16lib-$(CONFIG_PERFMON) += carta_random.o
17lib-$(CONFIG_MD_RAID5) += xor.o 17lib-$(CONFIG_MD_RAID5) += xor.o
18lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
19 18
20AFLAGS___divdi3.o = 19AFLAGS___divdi3.o =
21AFLAGS___udivdi3.o = -DUNSIGNED 20AFLAGS___udivdi3.o = -DUNSIGNED
diff --git a/arch/ia64/lib/dec_and_lock.c b/arch/ia64/lib/dec_and_lock.c
deleted file mode 100644
index c7ce92f968f1..000000000000
--- a/arch/ia64/lib/dec_and_lock.c
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * Copyright (C) 2003 Jerome Marchand, Bull S.A.
3 * Cleaned up by David Mosberger-Tang <davidm@hpl.hp.com>
4 *
5 * This file is released under the GPLv2, or at your option any later version.
6 *
7 * ia64 version of "atomic_dec_and_lock()" using the atomic "cmpxchg" instruction. This
8 * code is an adaptation of the x86 version of "atomic_dec_and_lock()".
9 */
10
11#include <linux/compiler.h>
12#include <linux/module.h>
13#include <linux/spinlock.h>
14#include <asm/atomic.h>
15
16/*
17 * Decrement REFCOUNT and if the count reaches zero, acquire the spinlock. Both of these
18 * operations have to be done atomically, so that the count doesn't drop to zero without
19 * acquiring the spinlock first.
20 */
21int
22_atomic_dec_and_lock (atomic_t *refcount, spinlock_t *lock)
23{
24 int old, new;
25
26 do {
27 old = atomic_read(refcount);
28 new = old - 1;
29
30 if (unlikely (old == 1)) {
31 /* oops, we may be decrementing to zero, do it the slow way... */
32 spin_lock(lock);
33 if (atomic_dec_and_test(refcount))
34 return 1;
35 spin_unlock(lock);
36 return 0;
37 }
38 } while (cmpxchg(&refcount->counter, old, new) != old);
39 return 0;
40}
41
42EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 1ef3987ebc6a..4d100f3886e1 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -220,11 +220,6 @@ config PREEMPT
220 Say Y here if you are building a kernel for a desktop, embedded 220 Say Y here if you are building a kernel for a desktop, embedded
221 or real-time system. Say N if you are unsure. 221 or real-time system. Say N if you are unsure.
222 222
223config HAVE_DEC_LOCK
224 bool
225 depends on (SMP || PREEMPT)
226 default n
227
228config SMP 223config SMP
229 bool "Symmetric multi-processing support" 224 bool "Symmetric multi-processing support"
230 ---help--- 225 ---help---
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 0eb71ac303af..4cd724c05700 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1009,10 +1009,6 @@ config GENERIC_CALIBRATE_DELAY
1009 bool 1009 bool
1010 default y 1010 default y
1011 1011
1012config HAVE_DEC_LOCK
1013 bool
1014 default y
1015
1016# 1012#
1017# Select some configuration options automatically based on user selections. 1013# Select some configuration options automatically based on user selections.
1018# 1014#
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 21b92b9dd013..037303412909 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += csum_partial_copy.o dec_and_lock.o memcpy.o promlib.o \ 5lib-y += csum_partial_copy.o memcpy.o promlib.o \
6 strlen_user.o strncpy_user.o strnlen_user.o 6 strlen_user.o strncpy_user.o strnlen_user.o
7 7
8obj-y += iomap.o 8obj-y += iomap.o
diff --git a/arch/mips/lib/dec_and_lock.c b/arch/mips/lib/dec_and_lock.c
deleted file mode 100644
index fd82c84a93b7..000000000000
--- a/arch/mips/lib/dec_and_lock.c
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * MIPS version of atomic_dec_and_lock() using cmpxchg
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/module.h>
11#include <linux/spinlock.h>
12#include <asm/atomic.h>
13#include <asm/system.h>
14
15/*
16 * This is an implementation of the notion of "decrement a
17 * reference count, and return locked if it decremented to zero".
18 *
19 * This implementation can be used on any architecture that
20 * has a cmpxchg, and where atomic->value is an int holding
21 * the value of the atomic (i.e. the high bits aren't used
22 * for a lock or anything like that).
23 */
24int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
25{
26 int counter;
27 int newcount;
28
29 for (;;) {
30 counter = atomic_read(atomic);
31 newcount = counter - 1;
32 if (!newcount)
33 break; /* do it the slow way */
34
35 newcount = cmpxchg(&atomic->counter, counter, newcount);
36 if (newcount == counter)
37 return 0;
38 }
39
40 spin_lock(lock);
41 if (atomic_dec_and_test(atomic))
42 return 1;
43 spin_unlock(lock);
44 return 0;
45}
46
47EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 347ea284140b..776941c75672 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -26,10 +26,6 @@ config GENERIC_CALIBRATE_DELAY
26 bool 26 bool
27 default y 27 default y
28 28
29config HAVE_DEC_LOCK
30 bool
31 default y
32
33config PPC 29config PPC
34 bool 30 bool
35 default y 31 default y
diff --git a/arch/ppc/lib/Makefile b/arch/ppc/lib/Makefile
index f1e1fb4144f0..50358e4ea159 100644
--- a/arch/ppc/lib/Makefile
+++ b/arch/ppc/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for ppc-specific library files.. 2# Makefile for ppc-specific library files..
3# 3#
4 4
5obj-y := checksum.o string.o strcase.o dec_and_lock.o div64.o 5obj-y := checksum.o string.o strcase.o div64.o
6 6
7obj-$(CONFIG_8xx) += rheap.o 7obj-$(CONFIG_8xx) += rheap.o
8obj-$(CONFIG_CPM2) += rheap.o 8obj-$(CONFIG_CPM2) += rheap.o
diff --git a/arch/ppc/lib/dec_and_lock.c b/arch/ppc/lib/dec_and_lock.c
deleted file mode 100644
index b18f0d9a00fc..000000000000
--- a/arch/ppc/lib/dec_and_lock.c
+++ /dev/null
@@ -1,38 +0,0 @@
1#include <linux/module.h>
2#include <linux/spinlock.h>
3#include <asm/atomic.h>
4#include <asm/system.h>
5
6/*
7 * This is an implementation of the notion of "decrement a
8 * reference count, and return locked if it decremented to zero".
9 *
10 * This implementation can be used on any architecture that
11 * has a cmpxchg, and where atomic->value is an int holding
12 * the value of the atomic (i.e. the high bits aren't used
13 * for a lock or anything like that).
14 */
15int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
16{
17 int counter;
18 int newcount;
19
20 for (;;) {
21 counter = atomic_read(atomic);
22 newcount = counter - 1;
23 if (!newcount)
24 break; /* do it the slow way */
25
26 newcount = cmpxchg(&atomic->counter, counter, newcount);
27 if (newcount == counter)
28 return 0;
29 }
30
31 spin_lock(lock);
32 if (atomic_dec_and_test(atomic))
33 return 1;
34 spin_unlock(lock);
35 return 0;
36}
37
38EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index deca68ad644a..c658650af429 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -28,10 +28,6 @@ config GENERIC_ISA_DMA
28 bool 28 bool
29 default y 29 default y
30 30
31config HAVE_DEC_LOCK
32 bool
33 default y
34
35config EARLY_PRINTK 31config EARLY_PRINTK
36 bool 32 bool
37 default y 33 default y
diff --git a/arch/ppc64/lib/Makefile b/arch/ppc64/lib/Makefile
index 76fbfa9f706f..0b6e967de948 100644
--- a/arch/ppc64/lib/Makefile
+++ b/arch/ppc64/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for ppc64-specific library files.. 2# Makefile for ppc64-specific library files..
3# 3#
4 4
5lib-y := checksum.o dec_and_lock.o string.o strcase.o 5lib-y := checksum.o string.o strcase.o
6lib-y += copypage.o memcpy.o copyuser.o usercopy.o 6lib-y += copypage.o memcpy.o copyuser.o usercopy.o
7 7
8# Lock primitives are defined as no-ops in include/linux/spinlock.h 8# Lock primitives are defined as no-ops in include/linux/spinlock.h
diff --git a/arch/ppc64/lib/dec_and_lock.c b/arch/ppc64/lib/dec_and_lock.c
deleted file mode 100644
index 7b9d4da5cf92..000000000000
--- a/arch/ppc64/lib/dec_and_lock.c
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * ppc64 version of atomic_dec_and_lock() using cmpxchg
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/module.h>
11#include <linux/spinlock.h>
12#include <asm/atomic.h>
13#include <asm/system.h>
14
15/*
16 * This is an implementation of the notion of "decrement a
17 * reference count, and return locked if it decremented to zero".
18 *
19 * This implementation can be used on any architecture that
20 * has a cmpxchg, and where atomic->value is an int holding
21 * the value of the atomic (i.e. the high bits aren't used
22 * for a lock or anything like that).
23 */
24int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
25{
26 int counter;
27 int newcount;
28
29 for (;;) {
30 counter = atomic_read(atomic);
31 newcount = counter - 1;
32 if (!newcount)
33 break; /* do it the slow way */
34
35 newcount = cmpxchg(&atomic->counter, counter, newcount);
36 if (newcount == counter)
37 return 0;
38 }
39
40 spin_lock(lock);
41 if (atomic_dec_and_test(atomic))
42 return 1;
43 spin_unlock(lock);
44 return 0;
45}
46
47EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index cd8d39fb954d..af0e9411b83e 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -33,14 +33,6 @@ config DEBUG_BOOTMEM
33 depends on DEBUG_KERNEL 33 depends on DEBUG_KERNEL
34 bool "Debug BOOTMEM initialization" 34 bool "Debug BOOTMEM initialization"
35 35
36# We have a custom atomic_dec_and_lock() implementation but it's not
37# compatible with spinlock debugging so we need to fall back on
38# the generic version in that case.
39config HAVE_DEC_LOCK
40 bool
41 depends on SMP && !DEBUG_SPINLOCK
42 default y
43
44config MCOUNT 36config MCOUNT
45 bool 37 bool
46 depends on STACK_DEBUG 38 depends on STACK_DEBUG
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index cbb5e59824e5..fb7a5370dbfc 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -163,9 +163,6 @@ EXPORT_SYMBOL(atomic64_add);
163EXPORT_SYMBOL(atomic64_add_ret); 163EXPORT_SYMBOL(atomic64_add_ret);
164EXPORT_SYMBOL(atomic64_sub); 164EXPORT_SYMBOL(atomic64_sub);
165EXPORT_SYMBOL(atomic64_sub_ret); 165EXPORT_SYMBOL(atomic64_sub_ret);
166#ifdef CONFIG_SMP
167EXPORT_SYMBOL(_atomic_dec_and_lock);
168#endif
169 166
170/* Atomic bit operations. */ 167/* Atomic bit operations. */
171EXPORT_SYMBOL(test_and_set_bit); 168EXPORT_SYMBOL(test_and_set_bit);
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index d968aebe83b2..c295806500f7 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -14,6 +14,4 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
14 copy_in_user.o user_fixup.o memmove.o \ 14 copy_in_user.o user_fixup.o memmove.o \
15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o 15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
16 16
17lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
18
19obj-y += iomap.o 17obj-y += iomap.o
diff --git a/arch/sparc64/lib/dec_and_lock.S b/arch/sparc64/lib/dec_and_lock.S
deleted file mode 100644
index 8ee288dd0afc..000000000000
--- a/arch/sparc64/lib/dec_and_lock.S
+++ /dev/null
@@ -1,80 +0,0 @@
1/* $Id: dec_and_lock.S,v 1.5 2001/11/18 00:12:56 davem Exp $
2 * dec_and_lock.S: Sparc64 version of "atomic_dec_and_lock()"
3 * using cas and ldstub instructions.
4 *
5 * Copyright (C) 2000 David S. Miller (davem@redhat.com)
6 */
7#include <linux/config.h>
8#include <asm/thread_info.h>
9
10 .text
11 .align 64
12
13 /* CAS basically works like this:
14 *
15 * void CAS(MEM, REG1, REG2)
16 * {
17 * START_ATOMIC();
18 * if (*(MEM) == REG1) {
19 * TMP = *(MEM);
20 * *(MEM) = REG2;
21 * REG2 = TMP;
22 * } else
23 * REG2 = *(MEM);
24 * END_ATOMIC();
25 * }
26 */
27
28 .globl _atomic_dec_and_lock
29_atomic_dec_and_lock: /* %o0 = counter, %o1 = lock */
30loop1: lduw [%o0], %g2
31 subcc %g2, 1, %g7
32 be,pn %icc, start_to_zero
33 nop
34nzero: cas [%o0], %g2, %g7
35 cmp %g2, %g7
36 bne,pn %icc, loop1
37 mov 0, %g1
38
39out:
40 membar #StoreLoad | #StoreStore
41 retl
42 mov %g1, %o0
43start_to_zero:
44#ifdef CONFIG_PREEMPT
45 ldsw [%g6 + TI_PRE_COUNT], %g3
46 add %g3, 1, %g3
47 stw %g3, [%g6 + TI_PRE_COUNT]
48#endif
49to_zero:
50 ldstub [%o1], %g3
51 membar #StoreLoad | #StoreStore
52 brnz,pn %g3, spin_on_lock
53 nop
54loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */
55 cmp %g2, %g7
56
57 be,pt %icc, out
58 mov 1, %g1
59 lduw [%o0], %g2
60 subcc %g2, 1, %g7
61 be,pn %icc, loop2
62 nop
63 membar #StoreStore | #LoadStore
64 stb %g0, [%o1]
65#ifdef CONFIG_PREEMPT
66 ldsw [%g6 + TI_PRE_COUNT], %g3
67 sub %g3, 1, %g3
68 stw %g3, [%g6 + TI_PRE_COUNT]
69#endif
70
71 b,pt %xcc, nzero
72 nop
73spin_on_lock:
74 ldub [%o1], %g3
75 membar #LoadLoad
76 brnz,pt %g3, spin_on_lock
77 nop
78 ba,pt %xcc, to_zero
79 nop
80 nop
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index e63323e03ea9..0969d570f3b5 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -277,11 +277,6 @@ source "mm/Kconfig"
277config HAVE_ARCH_EARLY_PFN_TO_NID 277config HAVE_ARCH_EARLY_PFN_TO_NID
278 def_bool y 278 def_bool y
279 279
280config HAVE_DEC_LOCK
281 bool
282 depends on SMP
283 default y
284
285config NR_CPUS 280config NR_CPUS
286 int "Maximum number of CPUs (2-256)" 281 int "Maximum number of CPUs (2-256)"
287 range 2 256 282 range 2 256
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index 68ec03070e5a..fd99ddd009bc 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -178,10 +178,6 @@ EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
178 178
179EXPORT_SYMBOL(empty_zero_page); 179EXPORT_SYMBOL(empty_zero_page);
180 180
181#ifdef CONFIG_HAVE_DEC_LOCK
182EXPORT_SYMBOL(_atomic_dec_and_lock);
183#endif
184
185EXPORT_SYMBOL(die_chain); 181EXPORT_SYMBOL(die_chain);
186EXPORT_SYMBOL(register_die_notifier); 182EXPORT_SYMBOL(register_die_notifier);
187 183
diff --git a/arch/x86_64/lib/Makefile b/arch/x86_64/lib/Makefile
index 6b26a1c1e9ff..bba5db6cebd6 100644
--- a/arch/x86_64/lib/Makefile
+++ b/arch/x86_64/lib/Makefile
@@ -10,5 +10,3 @@ lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \
10 usercopy.o getuser.o putuser.o \ 10 usercopy.o getuser.o putuser.o \
11 thunk.o clear_page.o copy_page.o bitstr.o bitops.o 11 thunk.o clear_page.o copy_page.o bitstr.o bitops.o
12lib-y += memcpy.o memmove.o memset.o copy_user.o 12lib-y += memcpy.o memmove.o memset.o copy_user.o
13
14lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
diff --git a/arch/x86_64/lib/dec_and_lock.c b/arch/x86_64/lib/dec_and_lock.c
deleted file mode 100644
index ab43394dc775..000000000000
--- a/arch/x86_64/lib/dec_and_lock.c
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * x86 version of "atomic_dec_and_lock()" using
3 * the atomic "cmpxchg" instruction.
4 *
5 * (For CPU's lacking cmpxchg, we use the slow
6 * generic version, and this one never even gets
7 * compiled).
8 */
9
10#include <linux/spinlock.h>
11#include <asm/atomic.h>
12
13int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
14{
15 int counter;
16 int newcount;
17
18repeat:
19 counter = atomic_read(atomic);
20 newcount = counter-1;
21
22 if (!newcount)
23 goto slow_path;
24
25 asm volatile("lock; cmpxchgl %1,%2"
26 :"=a" (newcount)
27 :"r" (newcount), "m" (atomic->counter), "0" (counter));
28
29 /* If the above failed, "eax" will have changed */
30 if (newcount != counter)
31 goto repeat;
32 return 0;
33
34slow_path:
35 spin_lock(lock);
36 if (atomic_dec_and_test(atomic))
37 return 1;
38 spin_unlock(lock);
39 return 0;
40}
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 2b6257bec4c3..7e841aa2a4aa 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -26,10 +26,6 @@ config RWSEM_XCHGADD_ALGORITHM
26 bool 26 bool
27 default y 27 default y
28 28
29config HAVE_DEC_LOCK
30 bool
31 default y
32
33config GENERIC_HARDIRQS 29config GENERIC_HARDIRQS
34 bool 30 bool
35 default y 31 default y
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 2377af057d09..305a9663aee3 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -1,7 +1,41 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/spinlock.h> 2#include <linux/spinlock.h>
3#include <asm/atomic.h> 3#include <asm/atomic.h>
4#include <asm/system.h>
4 5
6#ifdef __HAVE_ARCH_CMPXCHG
7/*
8 * This is an implementation of the notion of "decrement a
9 * reference count, and return locked if it decremented to zero".
10 *
11 * This implementation can be used on any architecture that
12 * has a cmpxchg, and where atomic->value is an int holding
13 * the value of the atomic (i.e. the high bits aren't used
14 * for a lock or anything like that).
15 */
16int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
17{
18 int counter;
19 int newcount;
20
21 for (;;) {
22 counter = atomic_read(atomic);
23 newcount = counter - 1;
24 if (!newcount)
25 break; /* do it the slow way */
26
27 newcount = cmpxchg(&atomic->counter, counter, newcount);
28 if (newcount == counter)
29 return 0;
30 }
31
32 spin_lock(lock);
33 if (atomic_dec_and_test(atomic))
34 return 1;
35 spin_unlock(lock);
36 return 0;
37}
38#else
5/* 39/*
6 * This is an architecture-neutral, but slow, 40 * This is an architecture-neutral, but slow,
7 * implementation of the notion of "decrement 41 * implementation of the notion of "decrement
@@ -33,5 +67,6 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
33 spin_unlock(lock); 67 spin_unlock(lock);
34 return 0; 68 return 0;
35} 69}
70#endif
36 71
37EXPORT_SYMBOL(_atomic_dec_and_lock); 72EXPORT_SYMBOL(_atomic_dec_and_lock);