aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2013-02-08 12:13:30 -0500
committerRalf Baechle <ralf@linux-mips.org>2013-04-11 09:39:51 -0400
commit02b849f7613003fe5f9e58bf233d49b0ebd4a5e8 (patch)
tree78db26af28f5da12eddd69ad3c54752e6379118e /arch/mips/include
parent0bfbf6a256348b1543e638c7d7b2f3004b289fdb (diff)
MIPS: Get rid of the use of .macro in C code.
It fails with LTO and probably has always been a fragile. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/hazards.h371
-rw-r--r--arch/mips/include/asm/irqflags.h153
2 files changed, 319 insertions, 205 deletions
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index 44d6a5bde4a1..e3ee92d4dbe7 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -10,34 +10,13 @@
10#ifndef _ASM_HAZARDS_H 10#ifndef _ASM_HAZARDS_H
11#define _ASM_HAZARDS_H 11#define _ASM_HAZARDS_H
12 12
13#ifdef __ASSEMBLY__ 13#include <linux/stringify.h>
14#define ASMMACRO(name, code...) .macro name; code; .endm
15#else
16
17#include <asm/cpu-features.h>
18
19#define ASMMACRO(name, code...) \
20__asm__(".macro " #name "; " #code "; .endm"); \
21 \
22static inline void name(void) \
23{ \
24 __asm__ __volatile__ (#name); \
25}
26
27/*
28 * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
29 */
30extern void mips_ihb(void);
31
32#endif
33 14
34ASMMACRO(_ssnop, 15#define ___ssnop \
35 sll $0, $0, 1 16 sll $0, $0, 1
36 )
37 17
38ASMMACRO(_ehb, 18#define ___ehb \
39 sll $0, $0, 3 19 sll $0, $0, 3
40 )
41 20
42/* 21/*
43 * TLB hazards 22 * TLB hazards
@@ -48,24 +27,24 @@ ASMMACRO(_ehb,
48 * MIPSR2 defines ehb for hazard avoidance 27 * MIPSR2 defines ehb for hazard avoidance
49 */ 28 */
50 29
51ASMMACRO(mtc0_tlbw_hazard, 30#define __mtc0_tlbw_hazard \
52 _ehb 31 ___ehb
53 ) 32
54ASMMACRO(tlbw_use_hazard, 33#define __tlbw_use_hazard \
55 _ehb 34 ___ehb
56 ) 35
57ASMMACRO(tlb_probe_hazard, 36#define __tlb_probe_hazard \
58 _ehb 37 ___ehb
59 ) 38
60ASMMACRO(irq_enable_hazard, 39#define __irq_enable_hazard \
61 _ehb 40 ___ehb
62 ) 41
63ASMMACRO(irq_disable_hazard, 42#define __irq_disable_hazard \
64 _ehb 43 ___ehb
65 ) 44
66ASMMACRO(back_to_back_c0_hazard, 45#define __back_to_back_c0_hazard \
67 _ehb 46 ___ehb
68 ) 47
69/* 48/*
70 * gcc has a tradition of misscompiling the previous construct using the 49 * gcc has a tradition of misscompiling the previous construct using the
71 * address of a label as argument to inline assembler. Gas otoh has the 50 * address of a label as argument to inline assembler. Gas otoh has the
@@ -94,24 +73,42 @@ do { \
94 * These are slightly complicated by the fact that we guarantee R1 kernels to 73 * These are slightly complicated by the fact that we guarantee R1 kernels to
95 * run fine on R2 processors. 74 * run fine on R2 processors.
96 */ 75 */
97ASMMACRO(mtc0_tlbw_hazard, 76
98 _ssnop; _ssnop; _ehb 77#define __mtc0_tlbw_hazard \
99 ) 78 ___ssnop; \
100ASMMACRO(tlbw_use_hazard, 79 ___ssnop; \
101 _ssnop; _ssnop; _ssnop; _ehb 80 ___ehb
102 ) 81
103ASMMACRO(tlb_probe_hazard, 82#define __tlbw_use_hazard \
104 _ssnop; _ssnop; _ssnop; _ehb 83 ___ssnop; \
105 ) 84 ___ssnop; \
106ASMMACRO(irq_enable_hazard, 85 ___ssnop; \
107 _ssnop; _ssnop; _ssnop; _ehb 86 ___ehb
108 ) 87
109ASMMACRO(irq_disable_hazard, 88#define __tlb_probe_hazard \
110 _ssnop; _ssnop; _ssnop; _ehb 89 ___ssnop; \
111 ) 90 ___ssnop; \
112ASMMACRO(back_to_back_c0_hazard, 91 ___ssnop; \
113 _ssnop; _ssnop; _ssnop; _ehb 92 ___ehb
114 ) 93
94#define __irq_enable_hazard \
95 ___ssnop; \
96 ___ssnop; \
97 ___ssnop; \
98 ___ehb
99
100#define __irq_disable_hazard \
101 ___ssnop; \
102 ___ssnop; \
103 ___ssnop; \
104 ___ehb
105
106#define __back_to_back_c0_hazard \
107 ___ssnop; \
108 ___ssnop; \
109 ___ssnop; \
110 ___ehb
111
115/* 112/*
116 * gcc has a tradition of misscompiling the previous construct using the 113 * gcc has a tradition of misscompiling the previous construct using the
117 * address of a label as argument to inline assembler. Gas otoh has the 114 * address of a label as argument to inline assembler. Gas otoh has the
@@ -147,18 +144,18 @@ do { \
147 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 144 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
148 */ 145 */
149 146
150ASMMACRO(mtc0_tlbw_hazard, 147#define __mtc0_tlbw_hazard
151 ) 148
152ASMMACRO(tlbw_use_hazard, 149#define __tlbw_use_hazard
153 ) 150
154ASMMACRO(tlb_probe_hazard, 151#define __tlb_probe_hazard
155 ) 152
156ASMMACRO(irq_enable_hazard, 153#define __irq_enable_hazard
157 ) 154
158ASMMACRO(irq_disable_hazard, 155#define __irq_disable_hazard
159 ) 156
160ASMMACRO(back_to_back_c0_hazard, 157#define __back_to_back_c0_hazard
161 ) 158
162#define instruction_hazard() do { } while (0) 159#define instruction_hazard() do { } while (0)
163 160
164#elif defined(CONFIG_CPU_SB1) 161#elif defined(CONFIG_CPU_SB1)
@@ -166,19 +163,21 @@ ASMMACRO(back_to_back_c0_hazard,
166/* 163/*
167 * Mostly like R4000 for historic reasons 164 * Mostly like R4000 for historic reasons
168 */ 165 */
169ASMMACRO(mtc0_tlbw_hazard, 166#define __mtc0_tlbw_hazard
170 ) 167
171ASMMACRO(tlbw_use_hazard, 168#define __tlbw_use_hazard
172 ) 169
173ASMMACRO(tlb_probe_hazard, 170#define __tlb_probe_hazard
174 ) 171
175ASMMACRO(irq_enable_hazard, 172#define __irq_enable_hazard
176 ) 173
177ASMMACRO(irq_disable_hazard, 174#define __irq_disable_hazard \
178 _ssnop; _ssnop; _ssnop 175 ___ssnop; \
179 ) 176 ___ssnop; \
180ASMMACRO(back_to_back_c0_hazard, 177 ___ssnop
181 ) 178
179#define __back_to_back_c0_hazard
180
182#define instruction_hazard() do { } while (0) 181#define instruction_hazard() do { } while (0)
183 182
184#else 183#else
@@ -192,24 +191,35 @@ ASMMACRO(back_to_back_c0_hazard,
192 * hazard so this is nice trick to have an optimal code for a range of 191 * hazard so this is nice trick to have an optimal code for a range of
193 * processors. 192 * processors.
194 */ 193 */
195ASMMACRO(mtc0_tlbw_hazard, 194#define __mtc0_tlbw_hazard \
196 nop; nop 195 nop; \
197 ) 196 nop
198ASMMACRO(tlbw_use_hazard, 197
199 nop; nop; nop 198#define __tlbw_use_hazard \
200 ) 199 nop; \
201ASMMACRO(tlb_probe_hazard, 200 nop; \
202 nop; nop; nop 201 nop
203 ) 202
204ASMMACRO(irq_enable_hazard, 203#define __tlb_probe_hazard \
205 _ssnop; _ssnop; _ssnop; 204 nop; \
206 ) 205 nop; \
207ASMMACRO(irq_disable_hazard, 206 nop
208 nop; nop; nop 207
209 ) 208#define __irq_enable_hazard \
210ASMMACRO(back_to_back_c0_hazard, 209 ___ssnop; \
211 _ssnop; _ssnop; _ssnop; 210 ___ssnop; \
212 ) 211 ___ssnop
212
213#define __irq_disable_hazard \
214 nop; \
215 nop; \
216 nop
217
218#define __back_to_back_c0_hazard \
219 ___ssnop; \
220 ___ssnop; \
221 ___ssnop
222
213#define instruction_hazard() do { } while (0) 223#define instruction_hazard() do { } while (0)
214 224
215#endif 225#endif
@@ -218,32 +228,137 @@ ASMMACRO(back_to_back_c0_hazard,
218/* FPU hazards */ 228/* FPU hazards */
219 229
220#if defined(CONFIG_CPU_SB1) 230#if defined(CONFIG_CPU_SB1)
221ASMMACRO(enable_fpu_hazard, 231
222 .set push; 232#define __enable_fpu_hazard \
223 .set mips64; 233 .set push; \
224 .set noreorder; 234 .set mips64; \
225 _ssnop; 235 .set noreorder; \
226 bnezl $0, .+4; 236 ___ssnop; \
227 _ssnop; 237 bnezl $0, .+4; \
228 .set pop 238 ___ssnop; \
229) 239 .set pop
230ASMMACRO(disable_fpu_hazard, 240
231) 241#define __disable_fpu_hazard
232 242
233#elif defined(CONFIG_CPU_MIPSR2) 243#elif defined(CONFIG_CPU_MIPSR2)
234ASMMACRO(enable_fpu_hazard, 244
235 _ehb 245#define __enable_fpu_hazard \
236) 246 ___ehb
237ASMMACRO(disable_fpu_hazard, 247
238 _ehb 248#define __disable_fpu_hazard \
239) 249 ___ehb
250
240#else 251#else
241ASMMACRO(enable_fpu_hazard, 252
242 nop; nop; nop; nop 253#define __enable_fpu_hazard \
243) 254 nop; \
244ASMMACRO(disable_fpu_hazard, 255 nop; \
245 _ehb 256 nop; \
246) 257 nop
258
259#define __disable_fpu_hazard \
260 ___ehb
261
247#endif 262#endif
248 263
264#ifdef __ASSEMBLY__
265
266#define _ssnop ___ssnop
267#define _ehb ___ehb
268#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
269#define tlbw_use_hazard __tlbw_use_hazard
270#define tlb_probe_hazard __tlb_probe_hazard
271#define irq_enable_hazard __irq_enable_hazard
272#define irq_disable_hazard __irq_disable_hazard
273#define back_to_back_c0_hazard __back_to_back_c0_hazard
274#define enable_fpu_hazard __enable_fpu_hazard
275#define disable_fpu_hazard __disable_fpu_hazard
276
277#else
278
279#define _ssnop() \
280do { \
281 __asm__ __volatile__( \
282 __stringify(___ssnop) \
283 ); \
284} while (0)
285
286#define _ehb() \
287do { \
288 __asm__ __volatile__( \
289 __stringify(___ehb) \
290 ); \
291} while (0)
292
293
294#define mtc0_tlbw_hazard() \
295do { \
296 __asm__ __volatile__( \
297 __stringify(__mtc0_tlbw_hazard) \
298 ); \
299} while (0)
300
301
302#define tlbw_use_hazard() \
303do { \
304 __asm__ __volatile__( \
305 __stringify(__tlbw_use_hazard) \
306 ); \
307} while (0)
308
309
310#define tlb_probe_hazard() \
311do { \
312 __asm__ __volatile__( \
313 __stringify(__tlb_probe_hazard) \
314 ); \
315} while (0)
316
317
318#define irq_enable_hazard() \
319do { \
320 __asm__ __volatile__( \
321 __stringify(__irq_enable_hazard) \
322 ); \
323} while (0)
324
325
326#define irq_disable_hazard() \
327do { \
328 __asm__ __volatile__( \
329 __stringify(__irq_disable_hazard) \
330 ); \
331} while (0)
332
333
334#define back_to_back_c0_hazard() \
335do { \
336 __asm__ __volatile__( \
337 __stringify(__back_to_back_c0_hazard) \
338 ); \
339} while (0)
340
341
342#define enable_fpu_hazard() \
343do { \
344 __asm__ __volatile__( \
345 __stringify(__enable_fpu_hazard) \
346 ); \
347} while (0)
348
349
350#define disable_fpu_hazard() \
351do { \
352 __asm__ __volatile__( \
353 __stringify(__disable_fpu_hazard) \
354 ); \
355} while (0)
356
357/*
358 * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
359 */
360extern void mips_ihb(void);
361
362#endif /* __ASSEMBLY__ */
363
249#endif /* _ASM_HAZARDS_H */ 364#endif /* _ASM_HAZARDS_H */
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 9f3384c789d7..45c00951888b 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -14,53 +14,48 @@
14#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/stringify.h>
17#include <asm/hazards.h> 18#include <asm/hazards.h>
18 19
19#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) 20#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
20 21
21__asm__( 22static inline void arch_local_irq_disable(void)
22 " .macro arch_local_irq_disable\n" 23{
24 __asm__ __volatile__(
23 " .set push \n" 25 " .set push \n"
24 " .set noat \n" 26 " .set noat \n"
25 " di \n" 27 " di \n"
26 " irq_disable_hazard \n" 28 " " __stringify(__irq_disable_hazard) " \n"
27 " .set pop \n" 29 " .set pop \n"
28 " .endm \n"); 30 : /* no outputs */
29 31 : /* no inputs */
30static inline void arch_local_irq_disable(void) 32 : "memory");
31{
32 __asm__ __volatile__(
33 "arch_local_irq_disable"
34 : /* no outputs */
35 : /* no inputs */
36 : "memory");
37} 33}
38 34
35static inline unsigned long arch_local_irq_save(void)
36{
37 unsigned long flags;
39 38
40__asm__( 39 asm __volatile__(
41 " .macro arch_local_irq_save result \n"
42 " .set push \n" 40 " .set push \n"
43 " .set reorder \n" 41 " .set reorder \n"
44 " .set noat \n" 42 " .set noat \n"
45 " di \\result \n" 43 " di %[flags] \n"
46 " andi \\result, 1 \n" 44 " andi %[flags], 1 \n"
47 " irq_disable_hazard \n" 45 " " __stringify(__irq_disable_hazard) " \n"
48 " .set pop \n" 46 " .set pop \n"
49 " .endm \n"); 47 : [flags] "=r" (flags)
48 : /* no inputs */
49 : "memory");
50 50
51static inline unsigned long arch_local_irq_save(void)
52{
53 unsigned long flags;
54 asm volatile("arch_local_irq_save\t%0"
55 : "=r" (flags)
56 : /* no inputs */
57 : "memory");
58 return flags; 51 return flags;
59} 52}
60 53
54static inline void arch_local_irq_restore(unsigned long flags)
55{
56 unsigned long __tmp1;
61 57
62__asm__( 58 __asm__ __volatile__(
63 " .macro arch_local_irq_restore flags \n"
64 " .set push \n" 59 " .set push \n"
65 " .set noreorder \n" 60 " .set noreorder \n"
66 " .set noat \n" 61 " .set noat \n"
@@ -69,7 +64,7 @@ __asm__(
69 * Slow, but doesn't suffer from a relatively unlikely race 64 * Slow, but doesn't suffer from a relatively unlikely race
70 * condition we're having since days 1. 65 * condition we're having since days 1.
71 */ 66 */
72 " beqz \\flags, 1f \n" 67 " beqz %[flags], 1f \n"
73 " di \n" 68 " di \n"
74 " ei \n" 69 " ei \n"
75 "1: \n" 70 "1: \n"
@@ -78,33 +73,44 @@ __asm__(
78 * Fast, dangerous. Life is fun, life is good. 73 * Fast, dangerous. Life is fun, life is good.
79 */ 74 */
80 " mfc0 $1, $12 \n" 75 " mfc0 $1, $12 \n"
81 " ins $1, \\flags, 0, 1 \n" 76 " ins $1, %[flags], 0, 1 \n"
82 " mtc0 $1, $12 \n" 77 " mtc0 $1, $12 \n"
83#endif 78#endif
84 " irq_disable_hazard \n" 79 " " __stringify(__irq_disable_hazard) " \n"
85 " .set pop \n" 80 " .set pop \n"
86 " .endm \n"); 81 : [flags] "=r" (__tmp1)
87 82 : "0" (flags)
88static inline void arch_local_irq_restore(unsigned long flags) 83 : "memory");
89{
90 unsigned long __tmp1;
91
92 __asm__ __volatile__(
93 "arch_local_irq_restore\t%0"
94 : "=r" (__tmp1)
95 : "0" (flags)
96 : "memory");
97} 84}
98 85
99static inline void __arch_local_irq_restore(unsigned long flags) 86static inline void __arch_local_irq_restore(unsigned long flags)
100{ 87{
101 unsigned long __tmp1;
102
103 __asm__ __volatile__( 88 __asm__ __volatile__(
104 "arch_local_irq_restore\t%0" 89 " .set push \n"
105 : "=r" (__tmp1) 90 " .set noreorder \n"
106 : "0" (flags) 91 " .set noat \n"
107 : "memory"); 92#if defined(CONFIG_IRQ_CPU)
93 /*
94 * Slow, but doesn't suffer from a relatively unlikely race
95 * condition we're having since days 1.
96 */
97 " beqz %[flags], 1f \n"
98 " di \n"
99 " ei \n"
100 "1: \n"
101#else
102 /*
103 * Fast, dangerous. Life is fun, life is good.
104 */
105 " mfc0 $1, $12 \n"
106 " ins $1, %[flags], 0, 1 \n"
107 " mtc0 $1, $12 \n"
108#endif
109 " " __stringify(__irq_disable_hazard) " \n"
110 " .set pop \n"
111 : [flags] "=r" (flags)
112 : "0" (flags)
113 : "memory");
108} 114}
109#else 115#else
110/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ 116/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
@@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags);
115#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ 121#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
116 122
117 123
118__asm__( 124extern void smtc_ipi_replay(void);
119 " .macro arch_local_irq_enable \n" 125
126static inline void arch_local_irq_enable(void)
127{
128#ifdef CONFIG_MIPS_MT_SMTC
129 /*
130 * SMTC kernel needs to do a software replay of queued
131 * IPIs, at the cost of call overhead on each local_irq_enable()
132 */
133 smtc_ipi_replay();
134#endif
135 __asm__ __volatile__(
120 " .set push \n" 136 " .set push \n"
121 " .set reorder \n" 137 " .set reorder \n"
122 " .set noat \n" 138 " .set noat \n"
@@ -133,45 +149,28 @@ __asm__(
133 " xori $1,0x1e \n" 149 " xori $1,0x1e \n"
134 " mtc0 $1,$12 \n" 150 " mtc0 $1,$12 \n"
135#endif 151#endif
136 " irq_enable_hazard \n" 152 " " __stringify(__irq_enable_hazard) " \n"
137 " .set pop \n" 153 " .set pop \n"
138 " .endm"); 154 : /* no outputs */
139 155 : /* no inputs */
140extern void smtc_ipi_replay(void); 156 : "memory");
141
142static inline void arch_local_irq_enable(void)
143{
144#ifdef CONFIG_MIPS_MT_SMTC
145 /*
146 * SMTC kernel needs to do a software replay of queued
147 * IPIs, at the cost of call overhead on each local_irq_enable()
148 */
149 smtc_ipi_replay();
150#endif
151 __asm__ __volatile__(
152 "arch_local_irq_enable"
153 : /* no outputs */
154 : /* no inputs */
155 : "memory");
156} 157}
157 158
159static inline unsigned long arch_local_save_flags(void)
160{
161 unsigned long flags;
158 162
159__asm__( 163 asm __volatile__(
160 " .macro arch_local_save_flags flags \n"
161 " .set push \n" 164 " .set push \n"
162 " .set reorder \n" 165 " .set reorder \n"
163#ifdef CONFIG_MIPS_MT_SMTC 166#ifdef CONFIG_MIPS_MT_SMTC
164 " mfc0 \\flags, $2, 1 \n" 167 " mfc0 %[flags], $2, 1 \n"
165#else 168#else
166 " mfc0 \\flags, $12 \n" 169 " mfc0 %[flags], $12 \n"
167#endif 170#endif
168 " .set pop \n" 171 " .set pop \n"
169 " .endm \n"); 172 : [flags] "=r" (flags));
170 173
171static inline unsigned long arch_local_save_flags(void)
172{
173 unsigned long flags;
174 asm volatile("arch_local_save_flags %0" : "=r" (flags));
175 return flags; 174 return flags;
176} 175}
177 176