aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/irqflags.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2013-02-08 12:13:30 -0500
committerRalf Baechle <ralf@linux-mips.org>2013-04-11 09:39:51 -0400
commit02b849f7613003fe5f9e58bf233d49b0ebd4a5e8 (patch)
tree78db26af28f5da12eddd69ad3c54752e6379118e /arch/mips/include/asm/irqflags.h
parent0bfbf6a256348b1543e638c7d7b2f3004b289fdb (diff)
MIPS: Get rid of the use of .macro in C code.
It fails with LTO and probably has always been a fragile. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include/asm/irqflags.h')
-rw-r--r--arch/mips/include/asm/irqflags.h153
1 files changed, 76 insertions, 77 deletions
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 9f3384c789d7..45c00951888b 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -14,53 +14,48 @@
14#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/stringify.h>
17#include <asm/hazards.h> 18#include <asm/hazards.h>
18 19
19#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) 20#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
20 21
21__asm__( 22static inline void arch_local_irq_disable(void)
22 " .macro arch_local_irq_disable\n" 23{
24 __asm__ __volatile__(
23 " .set push \n" 25 " .set push \n"
24 " .set noat \n" 26 " .set noat \n"
25 " di \n" 27 " di \n"
26 " irq_disable_hazard \n" 28 " " __stringify(__irq_disable_hazard) " \n"
27 " .set pop \n" 29 " .set pop \n"
28 " .endm \n"); 30 : /* no outputs */
29 31 : /* no inputs */
30static inline void arch_local_irq_disable(void) 32 : "memory");
31{
32 __asm__ __volatile__(
33 "arch_local_irq_disable"
34 : /* no outputs */
35 : /* no inputs */
36 : "memory");
37} 33}
38 34
35static inline unsigned long arch_local_irq_save(void)
36{
37 unsigned long flags;
39 38
40__asm__( 39 asm __volatile__(
41 " .macro arch_local_irq_save result \n"
42 " .set push \n" 40 " .set push \n"
43 " .set reorder \n" 41 " .set reorder \n"
44 " .set noat \n" 42 " .set noat \n"
45 " di \\result \n" 43 " di %[flags] \n"
46 " andi \\result, 1 \n" 44 " andi %[flags], 1 \n"
47 " irq_disable_hazard \n" 45 " " __stringify(__irq_disable_hazard) " \n"
48 " .set pop \n" 46 " .set pop \n"
49 " .endm \n"); 47 : [flags] "=r" (flags)
48 : /* no inputs */
49 : "memory");
50 50
51static inline unsigned long arch_local_irq_save(void)
52{
53 unsigned long flags;
54 asm volatile("arch_local_irq_save\t%0"
55 : "=r" (flags)
56 : /* no inputs */
57 : "memory");
58 return flags; 51 return flags;
59} 52}
60 53
54static inline void arch_local_irq_restore(unsigned long flags)
55{
56 unsigned long __tmp1;
61 57
62__asm__( 58 __asm__ __volatile__(
63 " .macro arch_local_irq_restore flags \n"
64 " .set push \n" 59 " .set push \n"
65 " .set noreorder \n" 60 " .set noreorder \n"
66 " .set noat \n" 61 " .set noat \n"
@@ -69,7 +64,7 @@ __asm__(
69 * Slow, but doesn't suffer from a relatively unlikely race 64 * Slow, but doesn't suffer from a relatively unlikely race
70 * condition we're having since days 1. 65 * condition we're having since days 1.
71 */ 66 */
72 " beqz \\flags, 1f \n" 67 " beqz %[flags], 1f \n"
73 " di \n" 68 " di \n"
74 " ei \n" 69 " ei \n"
75 "1: \n" 70 "1: \n"
@@ -78,33 +73,44 @@ __asm__(
78 * Fast, dangerous. Life is fun, life is good. 73 * Fast, dangerous. Life is fun, life is good.
79 */ 74 */
80 " mfc0 $1, $12 \n" 75 " mfc0 $1, $12 \n"
81 " ins $1, \\flags, 0, 1 \n" 76 " ins $1, %[flags], 0, 1 \n"
82 " mtc0 $1, $12 \n" 77 " mtc0 $1, $12 \n"
83#endif 78#endif
84 " irq_disable_hazard \n" 79 " " __stringify(__irq_disable_hazard) " \n"
85 " .set pop \n" 80 " .set pop \n"
86 " .endm \n"); 81 : [flags] "=r" (__tmp1)
87 82 : "0" (flags)
88static inline void arch_local_irq_restore(unsigned long flags) 83 : "memory");
89{
90 unsigned long __tmp1;
91
92 __asm__ __volatile__(
93 "arch_local_irq_restore\t%0"
94 : "=r" (__tmp1)
95 : "0" (flags)
96 : "memory");
97} 84}
98 85
99static inline void __arch_local_irq_restore(unsigned long flags) 86static inline void __arch_local_irq_restore(unsigned long flags)
100{ 87{
101 unsigned long __tmp1;
102
103 __asm__ __volatile__( 88 __asm__ __volatile__(
104 "arch_local_irq_restore\t%0" 89 " .set push \n"
105 : "=r" (__tmp1) 90 " .set noreorder \n"
106 : "0" (flags) 91 " .set noat \n"
107 : "memory"); 92#if defined(CONFIG_IRQ_CPU)
93 /*
94 * Slow, but doesn't suffer from a relatively unlikely race
95 * condition we're having since days 1.
96 */
97 " beqz %[flags], 1f \n"
98 " di \n"
99 " ei \n"
100 "1: \n"
101#else
102 /*
103 * Fast, dangerous. Life is fun, life is good.
104 */
105 " mfc0 $1, $12 \n"
106 " ins $1, %[flags], 0, 1 \n"
107 " mtc0 $1, $12 \n"
108#endif
109 " " __stringify(__irq_disable_hazard) " \n"
110 " .set pop \n"
111 : [flags] "=r" (flags)
112 : "0" (flags)
113 : "memory");
108} 114}
109#else 115#else
110/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ 116/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
@@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags);
115#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ 121#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
116 122
117 123
118__asm__( 124extern void smtc_ipi_replay(void);
119 " .macro arch_local_irq_enable \n" 125
126static inline void arch_local_irq_enable(void)
127{
128#ifdef CONFIG_MIPS_MT_SMTC
129 /*
130 * SMTC kernel needs to do a software replay of queued
131 * IPIs, at the cost of call overhead on each local_irq_enable()
132 */
133 smtc_ipi_replay();
134#endif
135 __asm__ __volatile__(
120 " .set push \n" 136 " .set push \n"
121 " .set reorder \n" 137 " .set reorder \n"
122 " .set noat \n" 138 " .set noat \n"
@@ -133,45 +149,28 @@ __asm__(
133 " xori $1,0x1e \n" 149 " xori $1,0x1e \n"
134 " mtc0 $1,$12 \n" 150 " mtc0 $1,$12 \n"
135#endif 151#endif
136 " irq_enable_hazard \n" 152 " " __stringify(__irq_enable_hazard) " \n"
137 " .set pop \n" 153 " .set pop \n"
138 " .endm"); 154 : /* no outputs */
139 155 : /* no inputs */
140extern void smtc_ipi_replay(void); 156 : "memory");
141
142static inline void arch_local_irq_enable(void)
143{
144#ifdef CONFIG_MIPS_MT_SMTC
145 /*
146 * SMTC kernel needs to do a software replay of queued
147 * IPIs, at the cost of call overhead on each local_irq_enable()
148 */
149 smtc_ipi_replay();
150#endif
151 __asm__ __volatile__(
152 "arch_local_irq_enable"
153 : /* no outputs */
154 : /* no inputs */
155 : "memory");
156} 157}
157 158
159static inline unsigned long arch_local_save_flags(void)
160{
161 unsigned long flags;
158 162
159__asm__( 163 asm __volatile__(
160 " .macro arch_local_save_flags flags \n"
161 " .set push \n" 164 " .set push \n"
162 " .set reorder \n" 165 " .set reorder \n"
163#ifdef CONFIG_MIPS_MT_SMTC 166#ifdef CONFIG_MIPS_MT_SMTC
164 " mfc0 \\flags, $2, 1 \n" 167 " mfc0 %[flags], $2, 1 \n"
165#else 168#else
166 " mfc0 \\flags, $12 \n" 169 " mfc0 %[flags], $12 \n"
167#endif 170#endif
168 " .set pop \n" 171 " .set pop \n"
169 " .endm \n"); 172 : [flags] "=r" (flags));
170 173
171static inline unsigned long arch_local_save_flags(void)
172{
173 unsigned long flags;
174 asm volatile("arch_local_save_flags %0" : "=r" (flags));
175 return flags; 174 return flags;
176} 175}
177 176