summaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2011-12-26 14:57:22 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-27 14:11:40 -0500
commit348738afe530cc3a7306bbd3d1ccd322f9638f32 (patch)
tree7ae88b2743aa8d1c0b5633b43877bd6aeecfe0c8 /arch/sparc
parent371de6e4e0042adf4f9b54c414154f57414ddd37 (diff)
sparc32: drop unused atomic24 support
atomic24 support was used to semaphores in the past - but is no longer used. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/include/asm/atomic_32.h100
-rw-r--r--arch/sparc/lib/atomic_32.S55
-rw-r--r--arch/sparc/lib/ksyms.c6
3 files changed, 0 insertions, 161 deletions
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 5c3c8b69884d..534832f5b353 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -52,106 +52,6 @@ extern void atomic_set(atomic_t *, int);
52#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 52#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
53#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 53#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
54 54
55
56/* This is the old 24-bit implementation. It's still used internally
57 * by some sparc-specific code, notably the semaphore implementation.
58 */
59typedef struct { volatile int counter; } atomic24_t;
60
61#ifndef CONFIG_SMP
62
63#define ATOMIC24_INIT(i) { (i) }
64#define atomic24_read(v) ((v)->counter)
65#define atomic24_set(v, i) (((v)->counter) = i)
66
67#else
68/* We do the bulk of the actual work out of line in two common
69 * routines in assembler, see arch/sparc/lib/atomic.S for the
70 * "fun" details.
71 *
72 * For SMP the trick is you embed the spin lock byte within
73 * the word, use the low byte so signedness is easily retained
74 * via a quick arithmetic shift. It looks like this:
75 *
76 * ----------------------------------------
77 * | signed 24-bit counter value | lock | atomic_t
78 * ----------------------------------------
79 * 31 8 7 0
80 */
81
82#define ATOMIC24_INIT(i) { ((i) << 8) }
83
84static inline int atomic24_read(const atomic24_t *v)
85{
86 int ret = v->counter;
87
88 while(ret & 0xff)
89 ret = v->counter;
90
91 return ret >> 8;
92}
93
94#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
95#endif
96
97static inline int __atomic24_add(int i, atomic24_t *v)
98{
99 register volatile int *ptr asm("g1");
100 register int increment asm("g2");
101 register int tmp1 asm("g3");
102 register int tmp2 asm("g4");
103 register int tmp3 asm("g7");
104
105 ptr = &v->counter;
106 increment = i;
107
108 __asm__ __volatile__(
109 "mov %%o7, %%g4\n\t"
110 "call ___atomic24_add\n\t"
111 " add %%o7, 8, %%o7\n"
112 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
113 : "0" (increment), "r" (ptr)
114 : "memory", "cc");
115
116 return increment;
117}
118
119static inline int __atomic24_sub(int i, atomic24_t *v)
120{
121 register volatile int *ptr asm("g1");
122 register int increment asm("g2");
123 register int tmp1 asm("g3");
124 register int tmp2 asm("g4");
125 register int tmp3 asm("g7");
126
127 ptr = &v->counter;
128 increment = i;
129
130 __asm__ __volatile__(
131 "mov %%o7, %%g4\n\t"
132 "call ___atomic24_sub\n\t"
133 " add %%o7, 8, %%o7\n"
134 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
135 : "0" (increment), "r" (ptr)
136 : "memory", "cc");
137
138 return increment;
139}
140
141#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
142#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
143
144#define atomic24_dec_return(v) __atomic24_sub(1, (v))
145#define atomic24_inc_return(v) __atomic24_add(1, (v))
146
147#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
148#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
149
150#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
151#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
152
153#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
154
155/* Atomic operations are already serializing */ 55/* Atomic operations are already serializing */
156#define smp_mb__before_atomic_dec() barrier() 56#define smp_mb__before_atomic_dec() barrier()
157#define smp_mb__after_atomic_dec() barrier() 57#define smp_mb__after_atomic_dec() barrier()
diff --git a/arch/sparc/lib/atomic_32.S b/arch/sparc/lib/atomic_32.S
index 178cbb8ae1b9..eb6c7359cbd1 100644
--- a/arch/sparc/lib/atomic_32.S
+++ b/arch/sparc/lib/atomic_32.S
@@ -40,60 +40,5 @@ ___xchg32_sun4md:
40 mov %g4, %o7 40 mov %g4, %o7
41#endif 41#endif
42 42
43 /* Read asm-sparc/atomic.h carefully to understand how this works for SMP.
44 * Really, some things here for SMP are overly clever, go read the header.
45 */
46 .globl ___atomic24_add
47___atomic24_add:
48 rd %psr, %g3 ! Keep the code small, old way was stupid
49 nop; nop; nop; ! Let the bits set
50 or %g3, PSR_PIL, %g7 ! Disable interrupts
51 wr %g7, 0x0, %psr ! Set %psr
52 nop; nop; nop; ! Let the bits set
53#ifdef CONFIG_SMP
541: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
55 orcc %g7, 0x0, %g0 ! Did we get it?
56 bne 1b ! Nope...
57 ld [%g1], %g7 ! Load locked atomic24_t
58 sra %g7, 8, %g7 ! Get signed 24-bit integer
59 add %g7, %g2, %g2 ! Add in argument
60 sll %g2, 8, %g7 ! Transpose back to atomic24_t
61 st %g7, [%g1] ! Clever: This releases the lock as well.
62#else
63 ld [%g1], %g7 ! Load locked atomic24_t
64 add %g7, %g2, %g2 ! Add in argument
65 st %g2, [%g1] ! Store it back
66#endif
67 wr %g3, 0x0, %psr ! Restore original PSR_PIL
68 nop; nop; nop; ! Let the bits set
69 jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
70 mov %g4, %o7 ! Restore %o7
71
72 .globl ___atomic24_sub
73___atomic24_sub:
74 rd %psr, %g3 ! Keep the code small, old way was stupid
75 nop; nop; nop; ! Let the bits set
76 or %g3, PSR_PIL, %g7 ! Disable interrupts
77 wr %g7, 0x0, %psr ! Set %psr
78 nop; nop; nop; ! Let the bits set
79#ifdef CONFIG_SMP
801: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
81 orcc %g7, 0x0, %g0 ! Did we get it?
82 bne 1b ! Nope...
83 ld [%g1], %g7 ! Load locked atomic24_t
84 sra %g7, 8, %g7 ! Get signed 24-bit integer
85 sub %g7, %g2, %g2 ! Subtract argument
86 sll %g2, 8, %g7 ! Transpose back to atomic24_t
87 st %g7, [%g1] ! Clever: This releases the lock as well
88#else
89 ld [%g1], %g7 ! Load locked atomic24_t
90 sub %g7, %g2, %g2 ! Subtract argument
91 st %g2, [%g1] ! Store it back
92#endif
93 wr %g3, 0x0, %psr ! Restore original PSR_PIL
94 nop; nop; nop; ! Let the bits set
95 jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
96 mov %g4, %o7 ! Restore %o7
97
98 .globl __atomic_end 43 .globl __atomic_end
99__atomic_end: 44__atomic_end:
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 1b30bb3bfdb1..f73c2240fe60 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -62,8 +62,6 @@ extern void ___rw_read_enter(void);
62extern void ___rw_read_try(void); 62extern void ___rw_read_try(void);
63extern void ___rw_read_exit(void); 63extern void ___rw_read_exit(void);
64extern void ___rw_write_enter(void); 64extern void ___rw_write_enter(void);
65extern void ___atomic24_add(void);
66extern void ___atomic24_sub(void);
67 65
68/* Alias functions whose names begin with "." and export the aliases. 66/* Alias functions whose names begin with "." and export the aliases.
69 * The module references will be fixed up by module_frob_arch_sections. 67 * The module references will be fixed up by module_frob_arch_sections.
@@ -97,10 +95,6 @@ EXPORT_SYMBOL(___rw_read_exit);
97EXPORT_SYMBOL(___rw_write_enter); 95EXPORT_SYMBOL(___rw_write_enter);
98#endif 96#endif
99 97
100/* Atomic operations. */
101EXPORT_SYMBOL(___atomic24_add);
102EXPORT_SYMBOL(___atomic24_sub);
103
104EXPORT_SYMBOL(__ashrdi3); 98EXPORT_SYMBOL(__ashrdi3);
105EXPORT_SYMBOL(__ashldi3); 99EXPORT_SYMBOL(__ashldi3);
106EXPORT_SYMBOL(__lshrdi3); 100EXPORT_SYMBOL(__lshrdi3);