diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 09:48:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 09:48:00 -0400 |
commit | dbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch) | |
tree | 9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/sparc | |
parent | d6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff) | |
parent | 2291059c852706c6f5ffb400366042b7625066cd (diff) |
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar:
"This is a series kept separate from the main locking tree, which
cleans up and improves various details in the atomics type handling:
- Remove the unused atomic_or_long() method
- Consolidate and compress atomic ops implementations between
architectures, to reduce linecount and to make it easier to add new
ops.
- Rewrite generic atomic support to only require cmpxchg() from an
architecture - generate all other methods from that"
* 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read()
locking, mips: Fix atomics
locking, sparc64: Fix atomics
locking,arch: Rewrite generic atomic support
locking,arch,xtensa: Fold atomic_ops
locking,arch,sparc: Fold atomic_ops
locking,arch,sh: Fold atomic_ops
locking,arch,powerpc: Fold atomic_ops
locking,arch,parisc: Fold atomic_ops
locking,arch,mn10300: Fold atomic_ops
locking,arch,mips: Fold atomic_ops
locking,arch,metag: Fold atomic_ops
locking,arch,m68k: Fold atomic_ops
locking,arch,m32r: Fold atomic_ops
locking,arch,ia64: Fold atomic_ops
locking,arch,hexagon: Fold atomic_ops
locking,arch,cris: Fold atomic_ops
locking,arch,avr32: Fold atomic_ops
locking,arch,arm64: Fold atomic_ops
locking,arch,arm: Fold atomic_ops
...
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/include/asm/atomic_32.h | 19 | ||||
-rw-r--r-- | arch/sparc/include/asm/atomic_64.h | 49 | ||||
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 2 | ||||
-rw-r--r-- | arch/sparc/lib/atomic32.c | 29 | ||||
-rw-r--r-- | arch/sparc/lib/atomic_64.S | 163 | ||||
-rw-r--r-- | arch/sparc/lib/ksyms.c | 25 |
6 files changed, 136 insertions, 151 deletions
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 7aed2be45b44..765c1776ec9f 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h | |||
@@ -20,23 +20,22 @@ | |||
20 | 20 | ||
21 | #define ATOMIC_INIT(i) { (i) } | 21 | #define ATOMIC_INIT(i) { (i) } |
22 | 22 | ||
23 | int __atomic_add_return(int, atomic_t *); | 23 | int atomic_add_return(int, atomic_t *); |
24 | int atomic_cmpxchg(atomic_t *, int, int); | 24 | int atomic_cmpxchg(atomic_t *, int, int); |
25 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 25 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
26 | int __atomic_add_unless(atomic_t *, int, int); | 26 | int __atomic_add_unless(atomic_t *, int, int); |
27 | void atomic_set(atomic_t *, int); | 27 | void atomic_set(atomic_t *, int); |
28 | 28 | ||
29 | #define atomic_read(v) (*(volatile int *)&(v)->counter) | 29 | #define atomic_read(v) ACCESS_ONCE((v)->counter) |
30 | 30 | ||
31 | #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) | 31 | #define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v))) |
32 | #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) | 32 | #define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v))) |
33 | #define atomic_inc(v) ((void)__atomic_add_return( 1, (v))) | 33 | #define atomic_inc(v) ((void)atomic_add_return( 1, (v))) |
34 | #define atomic_dec(v) ((void)__atomic_add_return( -1, (v))) | 34 | #define atomic_dec(v) ((void)atomic_add_return( -1, (v))) |
35 | 35 | ||
36 | #define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v))) | 36 | #define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v))) |
37 | #define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v))) | 37 | #define atomic_inc_return(v) (atomic_add_return( 1, (v))) |
38 | #define atomic_inc_return(v) (__atomic_add_return( 1, (v))) | 38 | #define atomic_dec_return(v) (atomic_add_return( -1, (v))) |
39 | #define atomic_dec_return(v) (__atomic_add_return( -1, (v))) | ||
40 | 39 | ||
41 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | 40 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) |
42 | 41 | ||
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index bb894c8bec56..4082749913ce 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h | |||
@@ -14,33 +14,34 @@ | |||
14 | #define ATOMIC_INIT(i) { (i) } | 14 | #define ATOMIC_INIT(i) { (i) } |
15 | #define ATOMIC64_INIT(i) { (i) } | 15 | #define ATOMIC64_INIT(i) { (i) } |
16 | 16 | ||
17 | #define atomic_read(v) (*(volatile int *)&(v)->counter) | 17 | #define atomic_read(v) ACCESS_ONCE((v)->counter) |
18 | #define atomic64_read(v) (*(volatile long *)&(v)->counter) | 18 | #define atomic64_read(v) ACCESS_ONCE((v)->counter) |
19 | 19 | ||
20 | #define atomic_set(v, i) (((v)->counter) = i) | 20 | #define atomic_set(v, i) (((v)->counter) = i) |
21 | #define atomic64_set(v, i) (((v)->counter) = i) | 21 | #define atomic64_set(v, i) (((v)->counter) = i) |
22 | 22 | ||
23 | void atomic_add(int, atomic_t *); | 23 | #define ATOMIC_OP(op) \ |
24 | void atomic64_add(long, atomic64_t *); | 24 | void atomic_##op(int, atomic_t *); \ |
25 | void atomic_sub(int, atomic_t *); | 25 | void atomic64_##op(long, atomic64_t *); |
26 | void atomic64_sub(long, atomic64_t *); | ||
27 | 26 | ||
28 | int atomic_add_ret(int, atomic_t *); | 27 | #define ATOMIC_OP_RETURN(op) \ |
29 | long atomic64_add_ret(long, atomic64_t *); | 28 | int atomic_##op##_return(int, atomic_t *); \ |
30 | int atomic_sub_ret(int, atomic_t *); | 29 | long atomic64_##op##_return(long, atomic64_t *); |
31 | long atomic64_sub_ret(long, atomic64_t *); | ||
32 | 30 | ||
33 | #define atomic_dec_return(v) atomic_sub_ret(1, v) | 31 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) |
34 | #define atomic64_dec_return(v) atomic64_sub_ret(1, v) | ||
35 | 32 | ||
36 | #define atomic_inc_return(v) atomic_add_ret(1, v) | 33 | ATOMIC_OPS(add) |
37 | #define atomic64_inc_return(v) atomic64_add_ret(1, v) | 34 | ATOMIC_OPS(sub) |
38 | 35 | ||
39 | #define atomic_sub_return(i, v) atomic_sub_ret(i, v) | 36 | #undef ATOMIC_OPS |
40 | #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) | 37 | #undef ATOMIC_OP_RETURN |
38 | #undef ATOMIC_OP | ||
41 | 39 | ||
42 | #define atomic_add_return(i, v) atomic_add_ret(i, v) | 40 | #define atomic_dec_return(v) atomic_sub_return(1, v) |
43 | #define atomic64_add_return(i, v) atomic64_add_ret(i, v) | 41 | #define atomic64_dec_return(v) atomic64_sub_return(1, v) |
42 | |||
43 | #define atomic_inc_return(v) atomic_add_return(1, v) | ||
44 | #define atomic64_inc_return(v) atomic64_add_return(1, v) | ||
44 | 45 | ||
45 | /* | 46 | /* |
46 | * atomic_inc_and_test - increment and test | 47 | * atomic_inc_and_test - increment and test |
@@ -53,11 +54,11 @@ long atomic64_sub_ret(long, atomic64_t *); | |||
53 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | 54 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) |
54 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | 55 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) |
55 | 56 | ||
56 | #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) | 57 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) |
57 | #define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0) | 58 | #define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0) |
58 | 59 | ||
59 | #define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0) | 60 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) |
60 | #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) | 61 | #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0) |
61 | 62 | ||
62 | #define atomic_inc(v) atomic_add(1, v) | 63 | #define atomic_inc(v) atomic_add(1, v) |
63 | #define atomic64_inc(v) atomic64_add(1, v) | 64 | #define atomic64_inc(v) atomic64_add(1, v) |
@@ -65,8 +66,8 @@ long atomic64_sub_ret(long, atomic64_t *); | |||
65 | #define atomic_dec(v) atomic_sub(1, v) | 66 | #define atomic_dec(v) atomic_sub(1, v) |
66 | #define atomic64_dec(v) atomic64_sub(1, v) | 67 | #define atomic64_dec(v) atomic64_sub(1, v) |
67 | 68 | ||
68 | #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) | 69 | #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) |
69 | #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) | 70 | #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0) |
70 | 71 | ||
71 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) | 72 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) |
72 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 73 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index c9300bfaee5a..302c476413d5 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -1138,7 +1138,7 @@ static unsigned long penguins_are_doing_time; | |||
1138 | 1138 | ||
1139 | void smp_capture(void) | 1139 | void smp_capture(void) |
1140 | { | 1140 | { |
1141 | int result = atomic_add_ret(1, &smp_capture_depth); | 1141 | int result = atomic_add_return(1, &smp_capture_depth); |
1142 | 1142 | ||
1143 | if (result == 1) { | 1143 | if (result == 1) { |
1144 | int ncpus = num_online_cpus(); | 1144 | int ncpus = num_online_cpus(); |
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 1d32b54089aa..a7c418ac26af 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c | |||
@@ -27,18 +27,23 @@ static DEFINE_SPINLOCK(dummy); | |||
27 | 27 | ||
28 | #endif /* SMP */ | 28 | #endif /* SMP */ |
29 | 29 | ||
30 | int __atomic_add_return(int i, atomic_t *v) | 30 | #define ATOMIC_OP(op, cop) \ |
31 | { | 31 | int atomic_##op##_return(int i, atomic_t *v) \ |
32 | int ret; | 32 | { \ |
33 | unsigned long flags; | 33 | int ret; \ |
34 | spin_lock_irqsave(ATOMIC_HASH(v), flags); | 34 | unsigned long flags; \ |
35 | 35 | spin_lock_irqsave(ATOMIC_HASH(v), flags); \ | |
36 | ret = (v->counter += i); | 36 | \ |
37 | 37 | ret = (v->counter cop i); \ | |
38 | spin_unlock_irqrestore(ATOMIC_HASH(v), flags); | 38 | \ |
39 | return ret; | 39 | spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ |
40 | } | 40 | return ret; \ |
41 | EXPORT_SYMBOL(__atomic_add_return); | 41 | } \ |
42 | EXPORT_SYMBOL(atomic_##op##_return); | ||
43 | |||
44 | ATOMIC_OP(add, +=) | ||
45 | |||
46 | #undef ATOMIC_OP | ||
42 | 47 | ||
43 | int atomic_cmpxchg(atomic_t *v, int old, int new) | 48 | int atomic_cmpxchg(atomic_t *v, int old, int new) |
44 | { | 49 | { |
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index 85c233d0a340..05dac43907d1 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S | |||
@@ -14,109 +14,80 @@ | |||
14 | * memory barriers, and a second which returns | 14 | * memory barriers, and a second which returns |
15 | * a value and does the barriers. | 15 | * a value and does the barriers. |
16 | */ | 16 | */ |
17 | ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */ | ||
18 | BACKOFF_SETUP(%o2) | ||
19 | 1: lduw [%o1], %g1 | ||
20 | add %g1, %o0, %g7 | ||
21 | cas [%o1], %g1, %g7 | ||
22 | cmp %g1, %g7 | ||
23 | bne,pn %icc, BACKOFF_LABEL(2f, 1b) | ||
24 | nop | ||
25 | retl | ||
26 | nop | ||
27 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | ||
28 | ENDPROC(atomic_add) | ||
29 | 17 | ||
30 | ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */ | 18 | #define ATOMIC_OP(op) \ |
31 | BACKOFF_SETUP(%o2) | 19 | ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ |
32 | 1: lduw [%o1], %g1 | 20 | BACKOFF_SETUP(%o2); \ |
33 | sub %g1, %o0, %g7 | 21 | 1: lduw [%o1], %g1; \ |
34 | cas [%o1], %g1, %g7 | 22 | op %g1, %o0, %g7; \ |
35 | cmp %g1, %g7 | 23 | cas [%o1], %g1, %g7; \ |
36 | bne,pn %icc, BACKOFF_LABEL(2f, 1b) | 24 | cmp %g1, %g7; \ |
37 | nop | 25 | bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ |
38 | retl | 26 | nop; \ |
39 | nop | 27 | retl; \ |
40 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | 28 | nop; \ |
41 | ENDPROC(atomic_sub) | 29 | 2: BACKOFF_SPIN(%o2, %o3, 1b); \ |
30 | ENDPROC(atomic_##op); \ | ||
42 | 31 | ||
43 | ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ | 32 | #define ATOMIC_OP_RETURN(op) \ |
44 | BACKOFF_SETUP(%o2) | 33 | ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ |
45 | 1: lduw [%o1], %g1 | 34 | BACKOFF_SETUP(%o2); \ |
46 | add %g1, %o0, %g7 | 35 | 1: lduw [%o1], %g1; \ |
47 | cas [%o1], %g1, %g7 | 36 | op %g1, %o0, %g7; \ |
48 | cmp %g1, %g7 | 37 | cas [%o1], %g1, %g7; \ |
49 | bne,pn %icc, BACKOFF_LABEL(2f, 1b) | 38 | cmp %g1, %g7; \ |
50 | add %g1, %o0, %g1 | 39 | bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ |
51 | retl | 40 | op %g1, %o0, %g1; \ |
52 | sra %g1, 0, %o0 | 41 | retl; \ |
53 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | 42 | sra %g1, 0, %o0; \ |
54 | ENDPROC(atomic_add_ret) | 43 | 2: BACKOFF_SPIN(%o2, %o3, 1b); \ |
44 | ENDPROC(atomic_##op##_return); | ||
55 | 45 | ||
56 | ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ | 46 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) |
57 | BACKOFF_SETUP(%o2) | ||
58 | 1: lduw [%o1], %g1 | ||
59 | sub %g1, %o0, %g7 | ||
60 | cas [%o1], %g1, %g7 | ||
61 | cmp %g1, %g7 | ||
62 | bne,pn %icc, BACKOFF_LABEL(2f, 1b) | ||
63 | sub %g1, %o0, %g1 | ||
64 | retl | ||
65 | sra %g1, 0, %o0 | ||
66 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | ||
67 | ENDPROC(atomic_sub_ret) | ||
68 | 47 | ||
69 | ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */ | 48 | ATOMIC_OPS(add) |
70 | BACKOFF_SETUP(%o2) | 49 | ATOMIC_OPS(sub) |
71 | 1: ldx [%o1], %g1 | ||
72 | add %g1, %o0, %g7 | ||
73 | casx [%o1], %g1, %g7 | ||
74 | cmp %g1, %g7 | ||
75 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) | ||
76 | nop | ||
77 | retl | ||
78 | nop | ||
79 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | ||
80 | ENDPROC(atomic64_add) | ||
81 | 50 | ||
82 | ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */ | 51 | #undef ATOMIC_OPS |
83 | BACKOFF_SETUP(%o2) | 52 | #undef ATOMIC_OP_RETURN |
84 | 1: ldx [%o1], %g1 | 53 | #undef ATOMIC_OP |
85 | sub %g1, %o0, %g7 | ||
86 | casx [%o1], %g1, %g7 | ||
87 | cmp %g1, %g7 | ||
88 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) | ||
89 | nop | ||
90 | retl | ||
91 | nop | ||
92 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | ||
93 | ENDPROC(atomic64_sub) | ||
94 | 54 | ||
95 | ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ | 55 | #define ATOMIC64_OP(op) \ |
96 | BACKOFF_SETUP(%o2) | 56 | ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ |
97 | 1: ldx [%o1], %g1 | 57 | BACKOFF_SETUP(%o2); \ |
98 | add %g1, %o0, %g7 | 58 | 1: ldx [%o1], %g1; \ |
99 | casx [%o1], %g1, %g7 | 59 | op %g1, %o0, %g7; \ |
100 | cmp %g1, %g7 | 60 | casx [%o1], %g1, %g7; \ |
101 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) | 61 | cmp %g1, %g7; \ |
102 | nop | 62 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ |
103 | retl | 63 | nop; \ |
104 | add %g1, %o0, %o0 | 64 | retl; \ |
105 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | 65 | nop; \ |
106 | ENDPROC(atomic64_add_ret) | 66 | 2: BACKOFF_SPIN(%o2, %o3, 1b); \ |
67 | ENDPROC(atomic64_##op); \ | ||
107 | 68 | ||
108 | ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ | 69 | #define ATOMIC64_OP_RETURN(op) \ |
109 | BACKOFF_SETUP(%o2) | 70 | ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ |
110 | 1: ldx [%o1], %g1 | 71 | BACKOFF_SETUP(%o2); \ |
111 | sub %g1, %o0, %g7 | 72 | 1: ldx [%o1], %g1; \ |
112 | casx [%o1], %g1, %g7 | 73 | op %g1, %o0, %g7; \ |
113 | cmp %g1, %g7 | 74 | casx [%o1], %g1, %g7; \ |
114 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) | 75 | cmp %g1, %g7; \ |
115 | nop | 76 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ |
116 | retl | 77 | nop; \ |
117 | sub %g1, %o0, %o0 | 78 | retl; \ |
118 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | 79 | op %g1, %o0, %o0; \ |
119 | ENDPROC(atomic64_sub_ret) | 80 | 2: BACKOFF_SPIN(%o2, %o3, 1b); \ |
81 | ENDPROC(atomic64_##op##_return); | ||
82 | |||
83 | #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) | ||
84 | |||
85 | ATOMIC64_OPS(add) | ||
86 | ATOMIC64_OPS(sub) | ||
87 | |||
88 | #undef ATOMIC64_OPS | ||
89 | #undef ATOMIC64_OP_RETURN | ||
90 | #undef ATOMIC64_OP | ||
120 | 91 | ||
121 | ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ | 92 | ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ |
122 | BACKOFF_SETUP(%o2) | 93 | BACKOFF_SETUP(%o2) |
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 323335b9cd2b..1d649a95660c 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c | |||
@@ -99,14 +99,23 @@ EXPORT_SYMBOL(___copy_in_user); | |||
99 | EXPORT_SYMBOL(__clear_user); | 99 | EXPORT_SYMBOL(__clear_user); |
100 | 100 | ||
101 | /* Atomic counter implementation. */ | 101 | /* Atomic counter implementation. */ |
102 | EXPORT_SYMBOL(atomic_add); | 102 | #define ATOMIC_OP(op) \ |
103 | EXPORT_SYMBOL(atomic_add_ret); | 103 | EXPORT_SYMBOL(atomic_##op); \ |
104 | EXPORT_SYMBOL(atomic_sub); | 104 | EXPORT_SYMBOL(atomic64_##op); |
105 | EXPORT_SYMBOL(atomic_sub_ret); | 105 | |
106 | EXPORT_SYMBOL(atomic64_add); | 106 | #define ATOMIC_OP_RETURN(op) \ |
107 | EXPORT_SYMBOL(atomic64_add_ret); | 107 | EXPORT_SYMBOL(atomic_##op##_return); \ |
108 | EXPORT_SYMBOL(atomic64_sub); | 108 | EXPORT_SYMBOL(atomic64_##op##_return); |
109 | EXPORT_SYMBOL(atomic64_sub_ret); | 109 | |
110 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | ||
111 | |||
112 | ATOMIC_OPS(add) | ||
113 | ATOMIC_OPS(sub) | ||
114 | |||
115 | #undef ATOMIC_OPS | ||
116 | #undef ATOMIC_OP_RETURN | ||
117 | #undef ATOMIC_OP | ||
118 | |||
110 | EXPORT_SYMBOL(atomic64_dec_if_positive); | 119 | EXPORT_SYMBOL(atomic64_dec_if_positive); |
111 | 120 | ||
112 | /* Atomic bit operations. */ | 121 | /* Atomic bit operations. */ |