aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-12-17 19:18:47 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-17 19:18:47 -0500
commit8a8b836b91aa170a383f2f360b73d3d23160d9d7 (patch)
tree875a635f634a869b801c4efa8f145c5b7b7db8e4
parent216da721b881838d639a3987bf8a825e6b4aacdd (diff)
[SPARC]: Make bitops use same spinlocks as atomics.
Recent workqueue changes basically make this a formal requirement. Also, move atomic32.o from lib-y to obj-y since it exports symbols to modules. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c8
-rw-r--r--arch/sparc/kernel/time.c5
-rw-r--r--arch/sparc/lib/Makefile4
-rw-r--r--arch/sparc/lib/atomic32.c39
-rw-r--r--arch/sparc/lib/bitops.S109
-rw-r--r--include/asm-sparc/bitops.h100
6 files changed, 58 insertions, 207 deletions
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index 33dadd9f2871..d8e008a04e2b 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -83,9 +83,6 @@ extern int __divdi3(int, int);
83/* Private functions with odd calling conventions. */ 83/* Private functions with odd calling conventions. */
84extern void ___atomic24_add(void); 84extern void ___atomic24_add(void);
85extern void ___atomic24_sub(void); 85extern void ___atomic24_sub(void);
86extern void ___set_bit(void);
87extern void ___clear_bit(void);
88extern void ___change_bit(void);
89extern void ___rw_read_enter(void); 86extern void ___rw_read_enter(void);
90extern void ___rw_read_try(void); 87extern void ___rw_read_try(void);
91extern void ___rw_read_exit(void); 88extern void ___rw_read_exit(void);
@@ -125,11 +122,6 @@ EXPORT_SYMBOL(pfn_base);
125EXPORT_SYMBOL(___atomic24_add); 122EXPORT_SYMBOL(___atomic24_add);
126EXPORT_SYMBOL(___atomic24_sub); 123EXPORT_SYMBOL(___atomic24_sub);
127 124
128/* Bit operations. */
129EXPORT_SYMBOL(___set_bit);
130EXPORT_SYMBOL(___clear_bit);
131EXPORT_SYMBOL(___change_bit);
132
133/* Per-CPU information table */ 125/* Per-CPU information table */
134EXPORT_PER_CPU_SYMBOL(__cpu_data); 126EXPORT_PER_CPU_SYMBOL(__cpu_data);
135 127
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 6c7aa51b590f..2fcce000d877 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -78,7 +78,6 @@ unsigned long profile_pc(struct pt_regs *regs)
78 extern char __copy_user_begin[], __copy_user_end[]; 78 extern char __copy_user_begin[], __copy_user_end[];
79 extern char __atomic_begin[], __atomic_end[]; 79 extern char __atomic_begin[], __atomic_end[];
80 extern char __bzero_begin[], __bzero_end[]; 80 extern char __bzero_begin[], __bzero_end[];
81 extern char __bitops_begin[], __bitops_end[];
82 81
83 unsigned long pc = regs->pc; 82 unsigned long pc = regs->pc;
84 83
@@ -88,9 +87,7 @@ unsigned long profile_pc(struct pt_regs *regs)
88 (pc >= (unsigned long) __atomic_begin && 87 (pc >= (unsigned long) __atomic_begin &&
89 pc < (unsigned long) __atomic_end) || 88 pc < (unsigned long) __atomic_end) ||
90 (pc >= (unsigned long) __bzero_begin && 89 (pc >= (unsigned long) __bzero_begin &&
91 pc < (unsigned long) __bzero_end) || 90 pc < (unsigned long) __bzero_end))
92 (pc >= (unsigned long) __bitops_begin &&
93 pc < (unsigned long) __bitops_end))
94 pc = regs->u_regs[UREG_RETPC]; 91 pc = regs->u_regs[UREG_RETPC];
95 return pc; 92 return pc;
96} 93}
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 5db7e1d85385..9ddc5b9ce3bd 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -7,7 +7,7 @@ EXTRA_AFLAGS := -ansi -DST_DIV0=0x02
7lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \ 7lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
8 strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \ 8 strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \
9 strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \ 9 strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
10 copy_user.o locks.o atomic.o atomic32.o bitops.o \ 10 copy_user.o locks.o atomic.o \
11 lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o 11 lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
12 12
13obj-y += iomap.o 13obj-y += iomap.o atomic32.o
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index de84f8534bac..53ddcd9d1e60 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -76,3 +76,42 @@ void atomic_set(atomic_t *v, int i)
76 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); 76 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
77} 77}
78EXPORT_SYMBOL(atomic_set); 78EXPORT_SYMBOL(atomic_set);
79
80unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
81{
82 unsigned long old, flags;
83
84 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
85 old = *addr;
86 *addr = old | mask;
87 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
88
89 return old & mask;
90}
91EXPORT_SYMBOL(___set_bit);
92
93unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
94{
95 unsigned long old, flags;
96
97 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
98 old = *addr;
99 *addr = old & ~mask;
100 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
101
102 return old & mask;
103}
104EXPORT_SYMBOL(___clear_bit);
105
106unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
107{
108 unsigned long old, flags;
109
110 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
111 old = *addr;
112 *addr = old ^ mask;
113 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
114
115 return old & mask;
116}
117EXPORT_SYMBOL(___change_bit);
diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S
deleted file mode 100644
index cb7fb66a40c8..000000000000
--- a/arch/sparc/lib/bitops.S
+++ /dev/null
@@ -1,109 +0,0 @@
1/* bitops.S: Low level assembler bit operations.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <asm/ptrace.h>
7#include <asm/psr.h>
8
9 .text
10 .align 4
11
12 .globl __bitops_begin
13__bitops_begin:
14
15 /* Take bits in %g2 and set them in word at %g1,
16 * return whether bits were set in original value
17 * in %g2. %g4 holds value to restore into %o7
18 * in delay slot of jmpl return, %g3 + %g5 + %g7 can be
19 * used as temporaries and thus is considered clobbered
20 * by all callers.
21 */
22 .globl ___set_bit
23___set_bit:
24 rd %psr, %g3
25 nop; nop; nop;
26 or %g3, PSR_PIL, %g5
27 wr %g5, 0x0, %psr
28 nop; nop; nop
29#ifdef CONFIG_SMP
30 set bitops_spinlock, %g5
312: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
32 orcc %g7, 0x0, %g0 ! Did we get it?
33 bne 2b ! Nope...
34#endif
35 ld [%g1], %g7
36 or %g7, %g2, %g5
37 and %g7, %g2, %g2
38#ifdef CONFIG_SMP
39 st %g5, [%g1]
40 set bitops_spinlock, %g5
41 stb %g0, [%g5]
42#else
43 st %g5, [%g1]
44#endif
45 wr %g3, 0x0, %psr
46 nop; nop; nop
47 jmpl %o7, %g0
48 mov %g4, %o7
49
50 /* Same as above, but clears the bits from %g2 instead. */
51 .globl ___clear_bit
52___clear_bit:
53 rd %psr, %g3
54 nop; nop; nop
55 or %g3, PSR_PIL, %g5
56 wr %g5, 0x0, %psr
57 nop; nop; nop
58#ifdef CONFIG_SMP
59 set bitops_spinlock, %g5
602: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
61 orcc %g7, 0x0, %g0 ! Did we get it?
62 bne 2b ! Nope...
63#endif
64 ld [%g1], %g7
65 andn %g7, %g2, %g5
66 and %g7, %g2, %g2
67#ifdef CONFIG_SMP
68 st %g5, [%g1]
69 set bitops_spinlock, %g5
70 stb %g0, [%g5]
71#else
72 st %g5, [%g1]
73#endif
74 wr %g3, 0x0, %psr
75 nop; nop; nop
76 jmpl %o7, %g0
77 mov %g4, %o7
78
79 /* Same thing again, but this time toggles the bits from %g2. */
80 .globl ___change_bit
81___change_bit:
82 rd %psr, %g3
83 nop; nop; nop
84 or %g3, PSR_PIL, %g5
85 wr %g5, 0x0, %psr
86 nop; nop; nop
87#ifdef CONFIG_SMP
88 set bitops_spinlock, %g5
892: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
90 orcc %g7, 0x0, %g0 ! Did we get it?
91 bne 2b ! Nope...
92#endif
93 ld [%g1], %g7
94 xor %g7, %g2, %g5
95 and %g7, %g2, %g2
96#ifdef CONFIG_SMP
97 st %g5, [%g1]
98 set bitops_spinlock, %g5
99 stb %g0, [%g5]
100#else
101 st %g5, [%g1]
102#endif
103 wr %g3, 0x0, %psr
104 nop; nop; nop
105 jmpl %o7, %g0
106 mov %g4, %o7
107
108 .globl __bitops_end
109__bitops_end:
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h
index 04aa3318f76a..329e696e7751 100644
--- a/include/asm-sparc/bitops.h
+++ b/include/asm-sparc/bitops.h
@@ -14,6 +14,10 @@
14 14
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16 16
17extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
18extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
19extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
20
17/* 21/*
18 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0' 22 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
19 * is in the highest of the four bytes and bit '31' is the high bit 23 * is in the highest of the four bytes and bit '31' is the high bit
@@ -22,134 +26,62 @@
22 */ 26 */
23static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) 27static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
24{ 28{
25 register unsigned long mask asm("g2"); 29 unsigned long *ADDR, mask;
26 register unsigned long *ADDR asm("g1");
27 register int tmp1 asm("g3");
28 register int tmp2 asm("g4");
29 register int tmp3 asm("g5");
30 register int tmp4 asm("g7");
31 30
32 ADDR = ((unsigned long *) addr) + (nr >> 5); 31 ADDR = ((unsigned long *) addr) + (nr >> 5);
33 mask = 1 << (nr & 31); 32 mask = 1 << (nr & 31);
34 33
35 __asm__ __volatile__( 34 return ___set_bit(ADDR, mask) != 0;
36 "mov %%o7, %%g4\n\t"
37 "call ___set_bit\n\t"
38 " add %%o7, 8, %%o7\n"
39 : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
40 : "0" (mask), "r" (ADDR)
41 : "memory", "cc");
42
43 return mask != 0;
44} 35}
45 36
46static inline void set_bit(unsigned long nr, volatile unsigned long *addr) 37static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
47{ 38{
48 register unsigned long mask asm("g2"); 39 unsigned long *ADDR, mask;
49 register unsigned long *ADDR asm("g1");
50 register int tmp1 asm("g3");
51 register int tmp2 asm("g4");
52 register int tmp3 asm("g5");
53 register int tmp4 asm("g7");
54 40
55 ADDR = ((unsigned long *) addr) + (nr >> 5); 41 ADDR = ((unsigned long *) addr) + (nr >> 5);
56 mask = 1 << (nr & 31); 42 mask = 1 << (nr & 31);
57 43
58 __asm__ __volatile__( 44 (void) ___set_bit(ADDR, mask);
59 "mov %%o7, %%g4\n\t"
60 "call ___set_bit\n\t"
61 " add %%o7, 8, %%o7\n"
62 : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
63 : "0" (mask), "r" (ADDR)
64 : "memory", "cc");
65} 45}
66 46
67static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) 47static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
68{ 48{
69 register unsigned long mask asm("g2"); 49 unsigned long *ADDR, mask;
70 register unsigned long *ADDR asm("g1");
71 register int tmp1 asm("g3");
72 register int tmp2 asm("g4");
73 register int tmp3 asm("g5");
74 register int tmp4 asm("g7");
75 50
76 ADDR = ((unsigned long *) addr) + (nr >> 5); 51 ADDR = ((unsigned long *) addr) + (nr >> 5);
77 mask = 1 << (nr & 31); 52 mask = 1 << (nr & 31);
78 53
79 __asm__ __volatile__( 54 return ___clear_bit(ADDR, mask) != 0;
80 "mov %%o7, %%g4\n\t"
81 "call ___clear_bit\n\t"
82 " add %%o7, 8, %%o7\n"
83 : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
84 : "0" (mask), "r" (ADDR)
85 : "memory", "cc");
86
87 return mask != 0;
88} 55}
89 56
90static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) 57static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
91{ 58{
92 register unsigned long mask asm("g2"); 59 unsigned long *ADDR, mask;
93 register unsigned long *ADDR asm("g1");
94 register int tmp1 asm("g3");
95 register int tmp2 asm("g4");
96 register int tmp3 asm("g5");
97 register int tmp4 asm("g7");
98 60
99 ADDR = ((unsigned long *) addr) + (nr >> 5); 61 ADDR = ((unsigned long *) addr) + (nr >> 5);
100 mask = 1 << (nr & 31); 62 mask = 1 << (nr & 31);
101 63
102 __asm__ __volatile__( 64 (void) ___clear_bit(ADDR, mask);
103 "mov %%o7, %%g4\n\t"
104 "call ___clear_bit\n\t"
105 " add %%o7, 8, %%o7\n"
106 : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
107 : "0" (mask), "r" (ADDR)
108 : "memory", "cc");
109} 65}
110 66
111static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) 67static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
112{ 68{
113 register unsigned long mask asm("g2"); 69 unsigned long *ADDR, mask;
114 register unsigned long *ADDR asm("g1");
115 register int tmp1 asm("g3");
116 register int tmp2 asm("g4");
117 register int tmp3 asm("g5");
118 register int tmp4 asm("g7");
119 70
120 ADDR = ((unsigned long *) addr) + (nr >> 5); 71 ADDR = ((unsigned long *) addr) + (nr >> 5);
121 mask = 1 << (nr & 31); 72 mask = 1 << (nr & 31);
122 73
123 __asm__ __volatile__( 74 return ___change_bit(ADDR, mask) != 0;
124 "mov %%o7, %%g4\n\t"
125 "call ___change_bit\n\t"
126 " add %%o7, 8, %%o7\n"
127 : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
128 : "0" (mask), "r" (ADDR)
129 : "memory", "cc");
130
131 return mask != 0;
132} 75}
133 76
134static inline void change_bit(unsigned long nr, volatile unsigned long *addr) 77static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
135{ 78{
136 register unsigned long mask asm("g2"); 79 unsigned long *ADDR, mask;
137 register unsigned long *ADDR asm("g1");
138 register int tmp1 asm("g3");
139 register int tmp2 asm("g4");
140 register int tmp3 asm("g5");
141 register int tmp4 asm("g7");
142 80
143 ADDR = ((unsigned long *) addr) + (nr >> 5); 81 ADDR = ((unsigned long *) addr) + (nr >> 5);
144 mask = 1 << (nr & 31); 82 mask = 1 << (nr & 31);
145 83
146 __asm__ __volatile__( 84 (void) ___change_bit(ADDR, mask);
147 "mov %%o7, %%g4\n\t"
148 "call ___change_bit\n\t"
149 " add %%o7, 8, %%o7\n"
150 : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
151 : "0" (mask), "r" (ADDR)
152 : "memory", "cc");
153} 85}
154 86
155#include <asm-generic/bitops/non-atomic.h> 87#include <asm-generic/bitops/non-atomic.h>