aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/sparc
parentd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff)
parent2291059c852706c6f5ffb400366042b7625066cd (diff)
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/include/asm/atomic_32.h19
-rw-r--r--arch/sparc/include/asm/atomic_64.h49
-rw-r--r--arch/sparc/kernel/smp_64.c2
-rw-r--r--arch/sparc/lib/atomic32.c29
-rw-r--r--arch/sparc/lib/atomic_64.S163
-rw-r--r--arch/sparc/lib/ksyms.c25
6 files changed, 136 insertions, 151 deletions
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 7aed2be45b44..765c1776ec9f 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -20,23 +20,22 @@
20 20
21#define ATOMIC_INIT(i) { (i) } 21#define ATOMIC_INIT(i) { (i) }
22 22
23int __atomic_add_return(int, atomic_t *); 23int atomic_add_return(int, atomic_t *);
24int atomic_cmpxchg(atomic_t *, int, int); 24int atomic_cmpxchg(atomic_t *, int, int);
25#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 25#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
26int __atomic_add_unless(atomic_t *, int, int); 26int __atomic_add_unless(atomic_t *, int, int);
27void atomic_set(atomic_t *, int); 27void atomic_set(atomic_t *, int);
28 28
29#define atomic_read(v) (*(volatile int *)&(v)->counter) 29#define atomic_read(v) ACCESS_ONCE((v)->counter)
30 30
31#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) 31#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
32#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) 32#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
33#define atomic_inc(v) ((void)__atomic_add_return( 1, (v))) 33#define atomic_inc(v) ((void)atomic_add_return( 1, (v)))
34#define atomic_dec(v) ((void)__atomic_add_return( -1, (v))) 34#define atomic_dec(v) ((void)atomic_add_return( -1, (v)))
35 35
36#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v))) 36#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
37#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v))) 37#define atomic_inc_return(v) (atomic_add_return( 1, (v)))
38#define atomic_inc_return(v) (__atomic_add_return( 1, (v))) 38#define atomic_dec_return(v) (atomic_add_return( -1, (v)))
39#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
40 39
41#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 40#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
42 41
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index bb894c8bec56..4082749913ce 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -14,33 +14,34 @@
14#define ATOMIC_INIT(i) { (i) } 14#define ATOMIC_INIT(i) { (i) }
15#define ATOMIC64_INIT(i) { (i) } 15#define ATOMIC64_INIT(i) { (i) }
16 16
17#define atomic_read(v) (*(volatile int *)&(v)->counter) 17#define atomic_read(v) ACCESS_ONCE((v)->counter)
18#define atomic64_read(v) (*(volatile long *)&(v)->counter) 18#define atomic64_read(v) ACCESS_ONCE((v)->counter)
19 19
20#define atomic_set(v, i) (((v)->counter) = i) 20#define atomic_set(v, i) (((v)->counter) = i)
21#define atomic64_set(v, i) (((v)->counter) = i) 21#define atomic64_set(v, i) (((v)->counter) = i)
22 22
23void atomic_add(int, atomic_t *); 23#define ATOMIC_OP(op) \
24void atomic64_add(long, atomic64_t *); 24void atomic_##op(int, atomic_t *); \
25void atomic_sub(int, atomic_t *); 25void atomic64_##op(long, atomic64_t *);
26void atomic64_sub(long, atomic64_t *);
27 26
28int atomic_add_ret(int, atomic_t *); 27#define ATOMIC_OP_RETURN(op) \
29long atomic64_add_ret(long, atomic64_t *); 28int atomic_##op##_return(int, atomic_t *); \
30int atomic_sub_ret(int, atomic_t *); 29long atomic64_##op##_return(long, atomic64_t *);
31long atomic64_sub_ret(long, atomic64_t *);
32 30
33#define atomic_dec_return(v) atomic_sub_ret(1, v) 31#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
34#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
35 32
36#define atomic_inc_return(v) atomic_add_ret(1, v) 33ATOMIC_OPS(add)
37#define atomic64_inc_return(v) atomic64_add_ret(1, v) 34ATOMIC_OPS(sub)
38 35
39#define atomic_sub_return(i, v) atomic_sub_ret(i, v) 36#undef ATOMIC_OPS
40#define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) 37#undef ATOMIC_OP_RETURN
38#undef ATOMIC_OP
41 39
42#define atomic_add_return(i, v) atomic_add_ret(i, v) 40#define atomic_dec_return(v) atomic_sub_return(1, v)
43#define atomic64_add_return(i, v) atomic64_add_ret(i, v) 41#define atomic64_dec_return(v) atomic64_sub_return(1, v)
42
43#define atomic_inc_return(v) atomic_add_return(1, v)
44#define atomic64_inc_return(v) atomic64_add_return(1, v)
44 45
45/* 46/*
46 * atomic_inc_and_test - increment and test 47 * atomic_inc_and_test - increment and test
@@ -53,11 +54,11 @@ long atomic64_sub_ret(long, atomic64_t *);
53#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 54#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
54#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) 55#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
55 56
56#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) 57#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
57#define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0) 58#define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0)
58 59
59#define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0) 60#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
60#define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) 61#define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
61 62
62#define atomic_inc(v) atomic_add(1, v) 63#define atomic_inc(v) atomic_add(1, v)
63#define atomic64_inc(v) atomic64_add(1, v) 64#define atomic64_inc(v) atomic64_add(1, v)
@@ -65,8 +66,8 @@ long atomic64_sub_ret(long, atomic64_t *);
65#define atomic_dec(v) atomic_sub(1, v) 66#define atomic_dec(v) atomic_sub(1, v)
66#define atomic64_dec(v) atomic64_sub(1, v) 67#define atomic64_dec(v) atomic64_sub(1, v)
67 68
68#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) 69#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
69#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) 70#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
70 71
71#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 72#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
72#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 73#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index c9300bfaee5a..302c476413d5 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1138,7 +1138,7 @@ static unsigned long penguins_are_doing_time;
1138 1138
1139void smp_capture(void) 1139void smp_capture(void)
1140{ 1140{
1141 int result = atomic_add_ret(1, &smp_capture_depth); 1141 int result = atomic_add_return(1, &smp_capture_depth);
1142 1142
1143 if (result == 1) { 1143 if (result == 1) {
1144 int ncpus = num_online_cpus(); 1144 int ncpus = num_online_cpus();
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 1d32b54089aa..a7c418ac26af 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -27,18 +27,23 @@ static DEFINE_SPINLOCK(dummy);
27 27
28#endif /* SMP */ 28#endif /* SMP */
29 29
30int __atomic_add_return(int i, atomic_t *v) 30#define ATOMIC_OP(op, cop) \
31{ 31int atomic_##op##_return(int i, atomic_t *v) \
32 int ret; 32{ \
33 unsigned long flags; 33 int ret; \
34 spin_lock_irqsave(ATOMIC_HASH(v), flags); 34 unsigned long flags; \
35 35 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
36 ret = (v->counter += i); 36 \
37 37 ret = (v->counter cop i); \
38 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); 38 \
39 return ret; 39 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
40} 40 return ret; \
41EXPORT_SYMBOL(__atomic_add_return); 41} \
42EXPORT_SYMBOL(atomic_##op##_return);
43
44ATOMIC_OP(add, +=)
45
46#undef ATOMIC_OP
42 47
43int atomic_cmpxchg(atomic_t *v, int old, int new) 48int atomic_cmpxchg(atomic_t *v, int old, int new)
44{ 49{
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 85c233d0a340..05dac43907d1 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -14,109 +14,80 @@
14 * memory barriers, and a second which returns 14 * memory barriers, and a second which returns
15 * a value and does the barriers. 15 * a value and does the barriers.
16 */ 16 */
17ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
18 BACKOFF_SETUP(%o2)
191: lduw [%o1], %g1
20 add %g1, %o0, %g7
21 cas [%o1], %g1, %g7
22 cmp %g1, %g7
23 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
24 nop
25 retl
26 nop
272: BACKOFF_SPIN(%o2, %o3, 1b)
28ENDPROC(atomic_add)
29 17
30ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */ 18#define ATOMIC_OP(op) \
31 BACKOFF_SETUP(%o2) 19ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
321: lduw [%o1], %g1 20 BACKOFF_SETUP(%o2); \
33 sub %g1, %o0, %g7 211: lduw [%o1], %g1; \
34 cas [%o1], %g1, %g7 22 op %g1, %o0, %g7; \
35 cmp %g1, %g7 23 cas [%o1], %g1, %g7; \
36 bne,pn %icc, BACKOFF_LABEL(2f, 1b) 24 cmp %g1, %g7; \
37 nop 25 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
38 retl 26 nop; \
39 nop 27 retl; \
402: BACKOFF_SPIN(%o2, %o3, 1b) 28 nop; \
41ENDPROC(atomic_sub) 292: BACKOFF_SPIN(%o2, %o3, 1b); \
30ENDPROC(atomic_##op); \
42 31
43ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ 32#define ATOMIC_OP_RETURN(op) \
44 BACKOFF_SETUP(%o2) 33ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
451: lduw [%o1], %g1 34 BACKOFF_SETUP(%o2); \
46 add %g1, %o0, %g7 351: lduw [%o1], %g1; \
47 cas [%o1], %g1, %g7 36 op %g1, %o0, %g7; \
48 cmp %g1, %g7 37 cas [%o1], %g1, %g7; \
49 bne,pn %icc, BACKOFF_LABEL(2f, 1b) 38 cmp %g1, %g7; \
50 add %g1, %o0, %g1 39 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
51 retl 40 op %g1, %o0, %g1; \
52 sra %g1, 0, %o0 41 retl; \
532: BACKOFF_SPIN(%o2, %o3, 1b) 42 sra %g1, 0, %o0; \
54ENDPROC(atomic_add_ret) 432: BACKOFF_SPIN(%o2, %o3, 1b); \
44ENDPROC(atomic_##op##_return);
55 45
56ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ 46#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
57 BACKOFF_SETUP(%o2)
581: lduw [%o1], %g1
59 sub %g1, %o0, %g7
60 cas [%o1], %g1, %g7
61 cmp %g1, %g7
62 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
63 sub %g1, %o0, %g1
64 retl
65 sra %g1, 0, %o0
662: BACKOFF_SPIN(%o2, %o3, 1b)
67ENDPROC(atomic_sub_ret)
68 47
69ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */ 48ATOMIC_OPS(add)
70 BACKOFF_SETUP(%o2) 49ATOMIC_OPS(sub)
711: ldx [%o1], %g1
72 add %g1, %o0, %g7
73 casx [%o1], %g1, %g7
74 cmp %g1, %g7
75 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
76 nop
77 retl
78 nop
792: BACKOFF_SPIN(%o2, %o3, 1b)
80ENDPROC(atomic64_add)
81 50
82ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */ 51#undef ATOMIC_OPS
83 BACKOFF_SETUP(%o2) 52#undef ATOMIC_OP_RETURN
841: ldx [%o1], %g1 53#undef ATOMIC_OP
85 sub %g1, %o0, %g7
86 casx [%o1], %g1, %g7
87 cmp %g1, %g7
88 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
89 nop
90 retl
91 nop
922: BACKOFF_SPIN(%o2, %o3, 1b)
93ENDPROC(atomic64_sub)
94 54
95ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ 55#define ATOMIC64_OP(op) \
96 BACKOFF_SETUP(%o2) 56ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
971: ldx [%o1], %g1 57 BACKOFF_SETUP(%o2); \
98 add %g1, %o0, %g7 581: ldx [%o1], %g1; \
99 casx [%o1], %g1, %g7 59 op %g1, %o0, %g7; \
100 cmp %g1, %g7 60 casx [%o1], %g1, %g7; \
101 bne,pn %xcc, BACKOFF_LABEL(2f, 1b) 61 cmp %g1, %g7; \
102 nop 62 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
103 retl 63 nop; \
104 add %g1, %o0, %o0 64 retl; \
1052: BACKOFF_SPIN(%o2, %o3, 1b) 65 nop; \
106ENDPROC(atomic64_add_ret) 662: BACKOFF_SPIN(%o2, %o3, 1b); \
67ENDPROC(atomic64_##op); \
107 68
108ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ 69#define ATOMIC64_OP_RETURN(op) \
109 BACKOFF_SETUP(%o2) 70ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
1101: ldx [%o1], %g1 71 BACKOFF_SETUP(%o2); \
111 sub %g1, %o0, %g7 721: ldx [%o1], %g1; \
112 casx [%o1], %g1, %g7 73 op %g1, %o0, %g7; \
113 cmp %g1, %g7 74 casx [%o1], %g1, %g7; \
114 bne,pn %xcc, BACKOFF_LABEL(2f, 1b) 75 cmp %g1, %g7; \
115 nop 76 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
116 retl 77 nop; \
117 sub %g1, %o0, %o0 78 retl; \
1182: BACKOFF_SPIN(%o2, %o3, 1b) 79 op %g1, %o0, %o0; \
119ENDPROC(atomic64_sub_ret) 802: BACKOFF_SPIN(%o2, %o3, 1b); \
81ENDPROC(atomic64_##op##_return);
82
83#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
84
85ATOMIC64_OPS(add)
86ATOMIC64_OPS(sub)
87
88#undef ATOMIC64_OPS
89#undef ATOMIC64_OP_RETURN
90#undef ATOMIC64_OP
120 91
121ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ 92ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
122 BACKOFF_SETUP(%o2) 93 BACKOFF_SETUP(%o2)
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 323335b9cd2b..1d649a95660c 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -99,14 +99,23 @@ EXPORT_SYMBOL(___copy_in_user);
99EXPORT_SYMBOL(__clear_user); 99EXPORT_SYMBOL(__clear_user);
100 100
101/* Atomic counter implementation. */ 101/* Atomic counter implementation. */
102EXPORT_SYMBOL(atomic_add); 102#define ATOMIC_OP(op) \
103EXPORT_SYMBOL(atomic_add_ret); 103EXPORT_SYMBOL(atomic_##op); \
104EXPORT_SYMBOL(atomic_sub); 104EXPORT_SYMBOL(atomic64_##op);
105EXPORT_SYMBOL(atomic_sub_ret); 105
106EXPORT_SYMBOL(atomic64_add); 106#define ATOMIC_OP_RETURN(op) \
107EXPORT_SYMBOL(atomic64_add_ret); 107EXPORT_SYMBOL(atomic_##op##_return); \
108EXPORT_SYMBOL(atomic64_sub); 108EXPORT_SYMBOL(atomic64_##op##_return);
109EXPORT_SYMBOL(atomic64_sub_ret); 109
110#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
111
112ATOMIC_OPS(add)
113ATOMIC_OPS(sub)
114
115#undef ATOMIC_OPS
116#undef ATOMIC_OP_RETURN
117#undef ATOMIC_OP
118
110EXPORT_SYMBOL(atomic64_dec_if_positive); 119EXPORT_SYMBOL(atomic64_dec_if_positive);
111 120
112/* Atomic bit operations. */ 121/* Atomic bit operations. */