diff options
author | Stuart Menefy <stuart.menefy@st.com> | 2007-11-30 02:12:36 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-01-27 23:18:58 -0500 |
commit | 1efe4ce3ca126da77e450d5a83f7201949d76f62 (patch) | |
tree | fbae9902aa4103a9e86d06f841d580f24682e7b3 /include/asm-sh | |
parent | 53ff09422e5e7a6d6198b767c8f494e43ec8e3ae (diff) |
sh: GUSA atomic rollback support.
This implements kernel-level atomic rollback built on top of gUSA,
as an alternative non-IRQ based atomicity method. This is generally
a faster method for platforms that are lacking the LL/SC pairs that
SH-4A and later use, and is only supportable on legacy cores.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh')
-rw-r--r-- | include/asm-sh/atomic-grb.h | 169 | ||||
-rw-r--r-- | include/asm-sh/atomic.h | 10 | ||||
-rw-r--r-- | include/asm-sh/bitops-grb.h | 169 | ||||
-rw-r--r-- | include/asm-sh/bitops-irq.h | 91 | ||||
-rw-r--r-- | include/asm-sh/bitops.h | 91 | ||||
-rw-r--r-- | include/asm-sh/cmpxchg-grb.h | 70 | ||||
-rw-r--r-- | include/asm-sh/cmpxchg-irq.h | 40 | ||||
-rw-r--r-- | include/asm-sh/system.h | 40 |
8 files changed, 557 insertions, 123 deletions
diff --git a/include/asm-sh/atomic-grb.h b/include/asm-sh/atomic-grb.h new file mode 100644 index 000000000000..4c5b7dbfcedb --- /dev/null +++ b/include/asm-sh/atomic-grb.h | |||
@@ -0,0 +1,169 @@ | |||
1 | #ifndef __ASM_SH_ATOMIC_GRB_H | ||
2 | #define __ASM_SH_ATOMIC_GRB_H | ||
3 | |||
4 | static inline void atomic_add(int i, atomic_t *v) | ||
5 | { | ||
6 | int tmp; | ||
7 | |||
8 | __asm__ __volatile__ ( | ||
9 | " .align 2 \n\t" | ||
10 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
11 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
12 | " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ | ||
13 | " mov.l @%1, %0 \n\t" /* load old value */ | ||
14 | " add %2, %0 \n\t" /* add */ | ||
15 | " mov.l %0, @%1 \n\t" /* store new value */ | ||
16 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
17 | : "=&r" (tmp), | ||
18 | "+r" (v) | ||
19 | : "r" (i) | ||
20 | : "memory" , "r0", "r1"); | ||
21 | } | ||
22 | |||
23 | static inline void atomic_sub(int i, atomic_t *v) | ||
24 | { | ||
25 | int tmp; | ||
26 | |||
27 | __asm__ __volatile__ ( | ||
28 | " .align 2 \n\t" | ||
29 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
30 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
31 | " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ | ||
32 | " mov.l @%1, %0 \n\t" /* load old value */ | ||
33 | " sub %2, %0 \n\t" /* sub */ | ||
34 | " mov.l %0, @%1 \n\t" /* store new value */ | ||
35 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
36 | : "=&r" (tmp), | ||
37 | "+r" (v) | ||
38 | : "r" (i) | ||
39 | : "memory" , "r0", "r1"); | ||
40 | } | ||
41 | |||
42 | static inline int atomic_add_return(int i, atomic_t *v) | ||
43 | { | ||
44 | int tmp; | ||
45 | |||
46 | __asm__ __volatile__ ( | ||
47 | " .align 2 \n\t" | ||
48 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
49 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
50 | " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ | ||
51 | " mov.l @%1, %0 \n\t" /* load old value */ | ||
52 | " add %2, %0 \n\t" /* add */ | ||
53 | " mov.l %0, @%1 \n\t" /* store new value */ | ||
54 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
55 | : "=&r" (tmp), | ||
56 | "+r" (v) | ||
57 | : "r" (i) | ||
58 | : "memory" , "r0", "r1"); | ||
59 | |||
60 | return tmp; | ||
61 | } | ||
62 | |||
63 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
64 | { | ||
65 | int tmp; | ||
66 | |||
67 | __asm__ __volatile__ ( | ||
68 | " .align 2 \n\t" | ||
69 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
70 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
71 | " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ | ||
72 | " mov.l @%1, %0 \n\t" /* load old value */ | ||
73 | " sub %2, %0 \n\t" /* sub */ | ||
74 | " mov.l %0, @%1 \n\t" /* store new value */ | ||
75 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
76 | : "=&r" (tmp), | ||
77 | "+r" (v) | ||
78 | : "r" (i) | ||
79 | : "memory", "r0", "r1"); | ||
80 | |||
81 | return tmp; | ||
82 | } | ||
83 | |||
84 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | ||
85 | { | ||
86 | int tmp; | ||
87 | unsigned int _mask = ~mask; | ||
88 | |||
89 | __asm__ __volatile__ ( | ||
90 | " .align 2 \n\t" | ||
91 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
92 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
93 | " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ | ||
94 | " mov.l @%1, %0 \n\t" /* load old value */ | ||
95 | " and %2, %0 \n\t" /* add */ | ||
96 | " mov.l %0, @%1 \n\t" /* store new value */ | ||
97 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
98 | : "=&r" (tmp), | ||
99 | "+r" (v) | ||
100 | : "r" (_mask) | ||
101 | : "memory" , "r0", "r1"); | ||
102 | } | ||
103 | |||
104 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | ||
105 | { | ||
106 | int tmp; | ||
107 | |||
108 | __asm__ __volatile__ ( | ||
109 | " .align 2 \n\t" | ||
110 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
111 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
112 | " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ | ||
113 | " mov.l @%1, %0 \n\t" /* load old value */ | ||
114 | " or %2, %0 \n\t" /* or */ | ||
115 | " mov.l %0, @%1 \n\t" /* store new value */ | ||
116 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
117 | : "=&r" (tmp), | ||
118 | "+r" (v) | ||
119 | : "r" (mask) | ||
120 | : "memory" , "r0", "r1"); | ||
121 | } | ||
122 | |||
123 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
124 | { | ||
125 | int ret; | ||
126 | |||
127 | __asm__ __volatile__ ( | ||
128 | " .align 2 \n\t" | ||
129 | " mova 1f, r0 \n\t" | ||
130 | " nop \n\t" | ||
131 | " mov r15, r1 \n\t" | ||
132 | " mov #-8, r15 \n\t" | ||
133 | " mov.l @%1, %0 \n\t" | ||
134 | " cmp/eq %2, %0 \n\t" | ||
135 | " bf 1f \n\t" | ||
136 | " mov.l %3, @%1 \n\t" | ||
137 | "1: mov r1, r15 \n\t" | ||
138 | : "=&r" (ret) | ||
139 | : "r" (v), "r" (old), "r" (new) | ||
140 | : "memory" , "r0", "r1" , "t"); | ||
141 | |||
142 | return ret; | ||
143 | } | ||
144 | |||
145 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
146 | { | ||
147 | int ret; | ||
148 | unsigned long tmp; | ||
149 | |||
150 | __asm__ __volatile__ ( | ||
151 | " .align 2 \n\t" | ||
152 | " mova 1f, r0 \n\t" | ||
153 | " nop \n\t" | ||
154 | " mov r15, r1 \n\t" | ||
155 | " mov #-12, r15 \n\t" | ||
156 | " mov.l @%2, %1 \n\t" | ||
157 | " mov %1, %0 \n\t" | ||
158 | " cmp/eq %4, %0 \n\t" | ||
159 | " bt/s 1f \n\t" | ||
160 | " add %3, %1 \n\t" | ||
161 | " mov.l %1, @%2 \n\t" | ||
162 | "1: mov r1, r15 \n\t" | ||
163 | : "=&r" (ret), "=&r" (tmp) | ||
164 | : "r" (v), "r" (a), "r" (u) | ||
165 | : "memory" , "r0", "r1" , "t"); | ||
166 | |||
167 | return ret != u; | ||
168 | } | ||
169 | #endif /* __ASM_SH_ATOMIC_GRB_H */ | ||
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h index e12570b9339d..c043ef003028 100644 --- a/include/asm-sh/atomic.h +++ b/include/asm-sh/atomic.h | |||
@@ -17,7 +17,9 @@ typedef struct { volatile int counter; } atomic_t; | |||
17 | #include <linux/compiler.h> | 17 | #include <linux/compiler.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | 19 | ||
20 | #ifdef CONFIG_CPU_SH4A | 20 | #if defined(CONFIG_GUSA_RB) |
21 | #include <asm/atomic-grb.h> | ||
22 | #elif defined(CONFIG_CPU_SH4A) | ||
21 | #include <asm/atomic-llsc.h> | 23 | #include <asm/atomic-llsc.h> |
22 | #else | 24 | #else |
23 | #include <asm/atomic-irq.h> | 25 | #include <asm/atomic-irq.h> |
@@ -44,6 +46,7 @@ typedef struct { volatile int counter; } atomic_t; | |||
44 | #define atomic_inc(v) atomic_add(1,(v)) | 46 | #define atomic_inc(v) atomic_add(1,(v)) |
45 | #define atomic_dec(v) atomic_sub(1,(v)) | 47 | #define atomic_dec(v) atomic_sub(1,(v)) |
46 | 48 | ||
49 | #ifndef CONFIG_GUSA_RB | ||
47 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | 50 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
48 | { | 51 | { |
49 | int ret; | 52 | int ret; |
@@ -58,8 +61,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |||
58 | return ret; | 61 | return ret; |
59 | } | 62 | } |
60 | 63 | ||
61 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
62 | |||
63 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | 64 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
64 | { | 65 | { |
65 | int ret; | 66 | int ret; |
@@ -73,6 +74,9 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
73 | 74 | ||
74 | return ret != u; | 75 | return ret != u; |
75 | } | 76 | } |
77 | #endif | ||
78 | |||
79 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
76 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 80 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
77 | 81 | ||
78 | /* Atomic operations are already serializing on SH */ | 82 | /* Atomic operations are already serializing on SH */ |
diff --git a/include/asm-sh/bitops-grb.h b/include/asm-sh/bitops-grb.h new file mode 100644 index 000000000000..a5907b94395b --- /dev/null +++ b/include/asm-sh/bitops-grb.h | |||
@@ -0,0 +1,169 @@ | |||
1 | #ifndef __ASM_SH_BITOPS_GRB_H | ||
2 | #define __ASM_SH_BITOPS_GRB_H | ||
3 | |||
4 | static inline void set_bit(int nr, volatile void * addr) | ||
5 | { | ||
6 | int mask; | ||
7 | volatile unsigned int *a = addr; | ||
8 | unsigned long tmp; | ||
9 | |||
10 | a += nr >> 5; | ||
11 | mask = 1 << (nr & 0x1f); | ||
12 | |||
13 | __asm__ __volatile__ ( | ||
14 | " .align 2 \n\t" | ||
15 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
16 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
17 | " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ | ||
18 | " mov.l @%1, %0 \n\t" /* load old value */ | ||
19 | " or %2, %0 \n\t" /* or */ | ||
20 | " mov.l %0, @%1 \n\t" /* store new value */ | ||
21 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
22 | : "=&r" (tmp), | ||
23 | "+r" (a) | ||
24 | : "r" (mask) | ||
25 | : "memory" , "r0", "r1"); | ||
26 | } | ||
27 | |||
28 | static inline void clear_bit(int nr, volatile void * addr) | ||
29 | { | ||
30 | int mask; | ||
31 | volatile unsigned int *a = addr; | ||
32 | unsigned long tmp; | ||
33 | |||
34 | a += nr >> 5; | ||
35 | mask = ~(1 << (nr & 0x1f)); | ||
36 | __asm__ __volatile__ ( | ||
37 | " .align 2 \n\t" | ||
38 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
39 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
40 | " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ | ||
41 | " mov.l @%1, %0 \n\t" /* load old value */ | ||
42 | " and %2, %0 \n\t" /* and */ | ||
43 | " mov.l %0, @%1 \n\t" /* store new value */ | ||
44 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
45 | : "=&r" (tmp), | ||
46 | "+r" (a) | ||
47 | : "r" (mask) | ||
48 | : "memory" , "r0", "r1"); | ||
49 | } | ||
50 | |||
51 | static inline void change_bit(int nr, volatile void * addr) | ||
52 | { | ||
53 | int mask; | ||
54 | volatile unsigned int *a = addr; | ||
55 | unsigned long tmp; | ||
56 | |||
57 | a += nr >> 5; | ||
58 | mask = 1 << (nr & 0x1f); | ||
59 | __asm__ __volatile__ ( | ||
60 | " .align 2 \n\t" | ||
61 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
62 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
63 | " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ | ||
64 | " mov.l @%1, %0 \n\t" /* load old value */ | ||
65 | " xor %2, %0 \n\t" /* xor */ | ||
66 | " mov.l %0, @%1 \n\t" /* store new value */ | ||
67 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
68 | : "=&r" (tmp), | ||
69 | "+r" (a) | ||
70 | : "r" (mask) | ||
71 | : "memory" , "r0", "r1"); | ||
72 | } | ||
73 | |||
74 | static inline int test_and_set_bit(int nr, volatile void * addr) | ||
75 | { | ||
76 | int mask, retval; | ||
77 | volatile unsigned int *a = addr; | ||
78 | unsigned long tmp; | ||
79 | |||
80 | a += nr >> 5; | ||
81 | mask = 1 << (nr & 0x1f); | ||
82 | |||
83 | __asm__ __volatile__ ( | ||
84 | " .align 2 \n\t" | ||
85 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
86 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
87 | " mov #-14, r15 \n\t" /* LOGIN: r15 = size */ | ||
88 | " mov.l @%2, %0 \n\t" /* load old value */ | ||
89 | " mov %0, %1 \n\t" | ||
90 | " tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */ | ||
91 | " mov #-1, %1 \n\t" /* retvat = -1 */ | ||
92 | " negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */ | ||
93 | " or %3, %0 \n\t" | ||
94 | " mov.l %0, @%2 \n\t" /* store new value */ | ||
95 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
96 | : "=&r" (tmp), | ||
97 | "=&r" (retval), | ||
98 | "+r" (a) | ||
99 | : "r" (mask) | ||
100 | : "memory" , "r0", "r1" ,"t"); | ||
101 | |||
102 | return retval; | ||
103 | } | ||
104 | |||
105 | static inline int test_and_clear_bit(int nr, volatile void * addr) | ||
106 | { | ||
107 | int mask, retval,not_mask; | ||
108 | volatile unsigned int *a = addr; | ||
109 | unsigned long tmp; | ||
110 | |||
111 | a += nr >> 5; | ||
112 | mask = 1 << (nr & 0x1f); | ||
113 | |||
114 | not_mask = ~mask; | ||
115 | |||
116 | __asm__ __volatile__ ( | ||
117 | " .align 2 \n\t" | ||
118 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
119 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
120 | " mov #-14, r15 \n\t" /* LOGIN */ | ||
121 | " mov.l @%2, %0 \n\t" /* load old value */ | ||
122 | " mov %0, %1 \n\t" /* %1 = *a */ | ||
123 | " tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */ | ||
124 | " mov #-1, %1 \n\t" /* retvat = -1 */ | ||
125 | " negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */ | ||
126 | " and %4, %0 \n\t" | ||
127 | " mov.l %0, @%2 \n\t" /* store new value */ | ||
128 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
129 | : "=&r" (tmp), | ||
130 | "=&r" (retval), | ||
131 | "+r" (a) | ||
132 | : "r" (mask), | ||
133 | "r" (not_mask) | ||
134 | : "memory" , "r0", "r1", "t"); | ||
135 | |||
136 | return retval; | ||
137 | } | ||
138 | |||
139 | static inline int test_and_change_bit(int nr, volatile void * addr) | ||
140 | { | ||
141 | int mask, retval; | ||
142 | volatile unsigned int *a = addr; | ||
143 | unsigned long tmp; | ||
144 | |||
145 | a += nr >> 5; | ||
146 | mask = 1 << (nr & 0x1f); | ||
147 | |||
148 | __asm__ __volatile__ ( | ||
149 | " .align 2 \n\t" | ||
150 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
151 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
152 | " mov #-14, r15 \n\t" /* LOGIN */ | ||
153 | " mov.l @%2, %0 \n\t" /* load old value */ | ||
154 | " mov %0, %1 \n\t" /* %1 = *a */ | ||
155 | " tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */ | ||
156 | " mov #-1, %1 \n\t" /* retvat = -1 */ | ||
157 | " negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */ | ||
158 | " xor %3, %0 \n\t" | ||
159 | " mov.l %0, @%2 \n\t" /* store new value */ | ||
160 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
161 | : "=&r" (tmp), | ||
162 | "=&r" (retval), | ||
163 | "+r" (a) | ||
164 | : "r" (mask) | ||
165 | : "memory" , "r0", "r1", "t"); | ||
166 | |||
167 | return retval; | ||
168 | } | ||
169 | #endif /* __ASM_SH_BITOPS_GRB_H */ | ||
diff --git a/include/asm-sh/bitops-irq.h b/include/asm-sh/bitops-irq.h new file mode 100644 index 000000000000..653a12750584 --- /dev/null +++ b/include/asm-sh/bitops-irq.h | |||
@@ -0,0 +1,91 @@ | |||
1 | #ifndef __ASM_SH_BITOPS_IRQ_H | ||
2 | #define __ASM_SH_BITOPS_IRQ_H | ||
3 | |||
4 | static inline void set_bit(int nr, volatile void *addr) | ||
5 | { | ||
6 | int mask; | ||
7 | volatile unsigned int *a = addr; | ||
8 | unsigned long flags; | ||
9 | |||
10 | a += nr >> 5; | ||
11 | mask = 1 << (nr & 0x1f); | ||
12 | local_irq_save(flags); | ||
13 | *a |= mask; | ||
14 | local_irq_restore(flags); | ||
15 | } | ||
16 | |||
17 | static inline void clear_bit(int nr, volatile void *addr) | ||
18 | { | ||
19 | int mask; | ||
20 | volatile unsigned int *a = addr; | ||
21 | unsigned long flags; | ||
22 | |||
23 | a += nr >> 5; | ||
24 | mask = 1 << (nr & 0x1f); | ||
25 | local_irq_save(flags); | ||
26 | *a &= ~mask; | ||
27 | local_irq_restore(flags); | ||
28 | } | ||
29 | |||
30 | static inline void change_bit(int nr, volatile void *addr) | ||
31 | { | ||
32 | int mask; | ||
33 | volatile unsigned int *a = addr; | ||
34 | unsigned long flags; | ||
35 | |||
36 | a += nr >> 5; | ||
37 | mask = 1 << (nr & 0x1f); | ||
38 | local_irq_save(flags); | ||
39 | *a ^= mask; | ||
40 | local_irq_restore(flags); | ||
41 | } | ||
42 | |||
43 | static inline int test_and_set_bit(int nr, volatile void *addr) | ||
44 | { | ||
45 | int mask, retval; | ||
46 | volatile unsigned int *a = addr; | ||
47 | unsigned long flags; | ||
48 | |||
49 | a += nr >> 5; | ||
50 | mask = 1 << (nr & 0x1f); | ||
51 | local_irq_save(flags); | ||
52 | retval = (mask & *a) != 0; | ||
53 | *a |= mask; | ||
54 | local_irq_restore(flags); | ||
55 | |||
56 | return retval; | ||
57 | } | ||
58 | |||
59 | static inline int test_and_clear_bit(int nr, volatile void *addr) | ||
60 | { | ||
61 | int mask, retval; | ||
62 | volatile unsigned int *a = addr; | ||
63 | unsigned long flags; | ||
64 | |||
65 | a += nr >> 5; | ||
66 | mask = 1 << (nr & 0x1f); | ||
67 | local_irq_save(flags); | ||
68 | retval = (mask & *a) != 0; | ||
69 | *a &= ~mask; | ||
70 | local_irq_restore(flags); | ||
71 | |||
72 | return retval; | ||
73 | } | ||
74 | |||
75 | static inline int test_and_change_bit(int nr, volatile void *addr) | ||
76 | { | ||
77 | int mask, retval; | ||
78 | volatile unsigned int *a = addr; | ||
79 | unsigned long flags; | ||
80 | |||
81 | a += nr >> 5; | ||
82 | mask = 1 << (nr & 0x1f); | ||
83 | local_irq_save(flags); | ||
84 | retval = (mask & *a) != 0; | ||
85 | *a ^= mask; | ||
86 | local_irq_restore(flags); | ||
87 | |||
88 | return retval; | ||
89 | } | ||
90 | |||
91 | #endif /* __ASM_SH_BITOPS_IRQ_H */ | ||
diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h index a7bd81a7f064..b6ba5a60dec2 100644 --- a/include/asm-sh/bitops.h +++ b/include/asm-sh/bitops.h | |||
@@ -11,97 +11,18 @@ | |||
11 | /* For __swab32 */ | 11 | /* For __swab32 */ |
12 | #include <asm/byteorder.h> | 12 | #include <asm/byteorder.h> |
13 | 13 | ||
14 | static inline void set_bit(int nr, volatile void * addr) | 14 | #ifdef CONFIG_GUSA_RB |
15 | { | 15 | #include <asm/bitops-grb.h> |
16 | int mask; | 16 | #else |
17 | volatile unsigned int *a = addr; | 17 | #include <asm/bitops-irq.h> |
18 | unsigned long flags; | 18 | #endif |
19 | 19 | ||
20 | a += nr >> 5; | ||
21 | mask = 1 << (nr & 0x1f); | ||
22 | local_irq_save(flags); | ||
23 | *a |= mask; | ||
24 | local_irq_restore(flags); | ||
25 | } | ||
26 | 20 | ||
27 | /* | 21 | /* |
28 | * clear_bit() doesn't provide any barrier for the compiler. | 22 | * clear_bit() doesn't provide any barrier for the compiler. |
29 | */ | 23 | */ |
30 | #define smp_mb__before_clear_bit() barrier() | 24 | #define smp_mb__before_clear_bit() barrier() |
31 | #define smp_mb__after_clear_bit() barrier() | 25 | #define smp_mb__after_clear_bit() barrier() |
32 | static inline void clear_bit(int nr, volatile void * addr) | ||
33 | { | ||
34 | int mask; | ||
35 | volatile unsigned int *a = addr; | ||
36 | unsigned long flags; | ||
37 | |||
38 | a += nr >> 5; | ||
39 | mask = 1 << (nr & 0x1f); | ||
40 | local_irq_save(flags); | ||
41 | *a &= ~mask; | ||
42 | local_irq_restore(flags); | ||
43 | } | ||
44 | |||
45 | static inline void change_bit(int nr, volatile void * addr) | ||
46 | { | ||
47 | int mask; | ||
48 | volatile unsigned int *a = addr; | ||
49 | unsigned long flags; | ||
50 | |||
51 | a += nr >> 5; | ||
52 | mask = 1 << (nr & 0x1f); | ||
53 | local_irq_save(flags); | ||
54 | *a ^= mask; | ||
55 | local_irq_restore(flags); | ||
56 | } | ||
57 | |||
58 | static inline int test_and_set_bit(int nr, volatile void * addr) | ||
59 | { | ||
60 | int mask, retval; | ||
61 | volatile unsigned int *a = addr; | ||
62 | unsigned long flags; | ||
63 | |||
64 | a += nr >> 5; | ||
65 | mask = 1 << (nr & 0x1f); | ||
66 | local_irq_save(flags); | ||
67 | retval = (mask & *a) != 0; | ||
68 | *a |= mask; | ||
69 | local_irq_restore(flags); | ||
70 | |||
71 | return retval; | ||
72 | } | ||
73 | |||
74 | static inline int test_and_clear_bit(int nr, volatile void * addr) | ||
75 | { | ||
76 | int mask, retval; | ||
77 | volatile unsigned int *a = addr; | ||
78 | unsigned long flags; | ||
79 | |||
80 | a += nr >> 5; | ||
81 | mask = 1 << (nr & 0x1f); | ||
82 | local_irq_save(flags); | ||
83 | retval = (mask & *a) != 0; | ||
84 | *a &= ~mask; | ||
85 | local_irq_restore(flags); | ||
86 | |||
87 | return retval; | ||
88 | } | ||
89 | |||
90 | static inline int test_and_change_bit(int nr, volatile void * addr) | ||
91 | { | ||
92 | int mask, retval; | ||
93 | volatile unsigned int *a = addr; | ||
94 | unsigned long flags; | ||
95 | |||
96 | a += nr >> 5; | ||
97 | mask = 1 << (nr & 0x1f); | ||
98 | local_irq_save(flags); | ||
99 | retval = (mask & *a) != 0; | ||
100 | *a ^= mask; | ||
101 | local_irq_restore(flags); | ||
102 | |||
103 | return retval; | ||
104 | } | ||
105 | 26 | ||
106 | #include <asm-generic/bitops/non-atomic.h> | 27 | #include <asm-generic/bitops/non-atomic.h> |
107 | 28 | ||
diff --git a/include/asm-sh/cmpxchg-grb.h b/include/asm-sh/cmpxchg-grb.h new file mode 100644 index 000000000000..e2681abe764f --- /dev/null +++ b/include/asm-sh/cmpxchg-grb.h | |||
@@ -0,0 +1,70 @@ | |||
1 | #ifndef __ASM_SH_CMPXCHG_GRB_H | ||
2 | #define __ASM_SH_CMPXCHG_GRB_H | ||
3 | |||
4 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | ||
5 | { | ||
6 | unsigned long retval; | ||
7 | |||
8 | __asm__ __volatile__ ( | ||
9 | " .align 2 \n\t" | ||
10 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
11 | " nop \n\t" | ||
12 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
13 | " mov #-4, r15 \n\t" /* LOGIN */ | ||
14 | " mov.l @%1, %0 \n\t" /* load old value */ | ||
15 | " mov.l %2, @%1 \n\t" /* store new value */ | ||
16 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
17 | : "=&r" (retval), | ||
18 | "+r" (m) | ||
19 | : "r" (val) | ||
20 | : "memory", "r0", "r1"); | ||
21 | |||
22 | return retval; | ||
23 | } | ||
24 | |||
25 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | ||
26 | { | ||
27 | unsigned long retval; | ||
28 | |||
29 | __asm__ __volatile__ ( | ||
30 | " .align 2 \n\t" | ||
31 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
32 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
33 | " mov #-6, r15 \n\t" /* LOGIN */ | ||
34 | " mov.b @%1, %0 \n\t" /* load old value */ | ||
35 | " extu.b %0, %0 \n\t" /* extend as unsigned */ | ||
36 | " mov.b %2, @%1 \n\t" /* store new value */ | ||
37 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
38 | : "=&r" (retval), | ||
39 | "+r" (m) | ||
40 | : "r" (val) | ||
41 | : "memory" , "r0", "r1"); | ||
42 | |||
43 | return retval; | ||
44 | } | ||
45 | |||
46 | static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, | ||
47 | unsigned long new) | ||
48 | { | ||
49 | unsigned long retval; | ||
50 | |||
51 | __asm__ __volatile__ ( | ||
52 | " .align 2 \n\t" | ||
53 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
54 | " nop \n\t" | ||
55 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
56 | " mov #-8, r15 \n\t" /* LOGIN */ | ||
57 | " mov.l @%1, %0 \n\t" /* load old value */ | ||
58 | " cmp/eq %0, %2 \n\t" | ||
59 | " bf 1f \n\t" /* if not equal */ | ||
60 | " mov.l %2, @%1 \n\t" /* store new value */ | ||
61 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
62 | : "=&r" (retval), | ||
63 | "+r" (m) | ||
64 | : "r" (new) | ||
65 | : "memory" , "r0", "r1", "t"); | ||
66 | |||
67 | return retval; | ||
68 | } | ||
69 | |||
70 | #endif /* __ASM_SH_CMPXCHG_GRB_H */ | ||
diff --git a/include/asm-sh/cmpxchg-irq.h b/include/asm-sh/cmpxchg-irq.h new file mode 100644 index 000000000000..43049ec0554b --- /dev/null +++ b/include/asm-sh/cmpxchg-irq.h | |||
@@ -0,0 +1,40 @@ | |||
1 | #ifndef __ASM_SH_CMPXCHG_IRQ_H | ||
2 | #define __ASM_SH_CMPXCHG_IRQ_H | ||
3 | |||
4 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | ||
5 | { | ||
6 | unsigned long flags, retval; | ||
7 | |||
8 | local_irq_save(flags); | ||
9 | retval = *m; | ||
10 | *m = val; | ||
11 | local_irq_restore(flags); | ||
12 | return retval; | ||
13 | } | ||
14 | |||
15 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | ||
16 | { | ||
17 | unsigned long flags, retval; | ||
18 | |||
19 | local_irq_save(flags); | ||
20 | retval = *m; | ||
21 | *m = val & 0xff; | ||
22 | local_irq_restore(flags); | ||
23 | return retval; | ||
24 | } | ||
25 | |||
26 | static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, | ||
27 | unsigned long new) | ||
28 | { | ||
29 | __u32 retval; | ||
30 | unsigned long flags; | ||
31 | |||
32 | local_irq_save(flags); | ||
33 | retval = *m; | ||
34 | if (retval == old) | ||
35 | *m = new; | ||
36 | local_irq_restore(flags); /* implies memory barrier */ | ||
37 | return retval; | ||
38 | } | ||
39 | |||
40 | #endif /* __ASM_SH_CMPXCHG_IRQ_H */ | ||
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index ad3d2a636130..969f3d4afe2a 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h | |||
@@ -68,27 +68,11 @@ | |||
68 | 68 | ||
69 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 69 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
70 | 70 | ||
71 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | 71 | #ifdef CONFIG_GUSA_RB |
72 | { | 72 | #include <asm/cmpxchg-grb.h> |
73 | unsigned long flags, retval; | 73 | #else |
74 | 74 | #include <asm/cmpxchg-irq.h> | |
75 | local_irq_save(flags); | 75 | #endif |
76 | retval = *m; | ||
77 | *m = val; | ||
78 | local_irq_restore(flags); | ||
79 | return retval; | ||
80 | } | ||
81 | |||
82 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | ||
83 | { | ||
84 | unsigned long flags, retval; | ||
85 | |||
86 | local_irq_save(flags); | ||
87 | retval = *m; | ||
88 | *m = val & 0xff; | ||
89 | local_irq_restore(flags); | ||
90 | return retval; | ||
91 | } | ||
92 | 76 | ||
93 | extern void __xchg_called_with_bad_pointer(void); | 77 | extern void __xchg_called_with_bad_pointer(void); |
94 | 78 | ||
@@ -115,20 +99,6 @@ extern void __xchg_called_with_bad_pointer(void); | |||
115 | #define xchg(ptr,x) \ | 99 | #define xchg(ptr,x) \ |
116 | ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) | 100 | ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) |
117 | 101 | ||
118 | static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | ||
119 | unsigned long new) | ||
120 | { | ||
121 | __u32 retval; | ||
122 | unsigned long flags; | ||
123 | |||
124 | local_irq_save(flags); | ||
125 | retval = *m; | ||
126 | if (retval == old) | ||
127 | *m = new; | ||
128 | local_irq_restore(flags); /* implies memory barrier */ | ||
129 | return retval; | ||
130 | } | ||
131 | |||
132 | /* This function doesn't exist, so you'll get a linker error | 102 | /* This function doesn't exist, so you'll get a linker error |
133 | * if something tries to do an invalid cmpxchg(). */ | 103 | * if something tries to do an invalid cmpxchg(). */ |
134 | extern void __cmpxchg_called_with_bad_pointer(void); | 104 | extern void __cmpxchg_called_with_bad_pointer(void); |