aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/irqflags.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm/irqflags.h')
-rw-r--r--arch/mips/include/asm/irqflags.h207
1 files changed, 75 insertions, 132 deletions
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 309cbcd6909c..9f3384c789d7 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -16,83 +16,13 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <asm/hazards.h> 17#include <asm/hazards.h>
18 18
19__asm__( 19#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
20 " .macro arch_local_irq_enable \n"
21 " .set push \n"
22 " .set reorder \n"
23 " .set noat \n"
24#ifdef CONFIG_MIPS_MT_SMTC
25 " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
26 " ori $1, 0x400 \n"
27 " xori $1, 0x400 \n"
28 " mtc0 $1, $2, 1 \n"
29#elif defined(CONFIG_CPU_MIPSR2)
30 " ei \n"
31#else
32 " mfc0 $1,$12 \n"
33 " ori $1,0x1f \n"
34 " xori $1,0x1e \n"
35 " mtc0 $1,$12 \n"
36#endif
37 " irq_enable_hazard \n"
38 " .set pop \n"
39 " .endm");
40 20
41extern void smtc_ipi_replay(void);
42
43static inline void arch_local_irq_enable(void)
44{
45#ifdef CONFIG_MIPS_MT_SMTC
46 /*
47 * SMTC kernel needs to do a software replay of queued
48 * IPIs, at the cost of call overhead on each local_irq_enable()
49 */
50 smtc_ipi_replay();
51#endif
52 __asm__ __volatile__(
53 "arch_local_irq_enable"
54 : /* no outputs */
55 : /* no inputs */
56 : "memory");
57}
58
59
60/*
61 * For cli() we have to insert nops to make sure that the new value
62 * has actually arrived in the status register before the end of this
63 * macro.
64 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
65 * no nops at all.
66 */
67/*
68 * For TX49, operating only IE bit is not enough.
69 *
70 * If mfc0 $12 follows store and the mfc0 is last instruction of a
71 * page and fetching the next instruction causes TLB miss, the result
72 * of the mfc0 might wrongly contain EXL bit.
73 *
74 * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
75 *
76 * Workaround: mask EXL bit of the result or place a nop before mfc0.
77 */
78__asm__( 21__asm__(
79 " .macro arch_local_irq_disable\n" 22 " .macro arch_local_irq_disable\n"
80 " .set push \n" 23 " .set push \n"
81 " .set noat \n" 24 " .set noat \n"
82#ifdef CONFIG_MIPS_MT_SMTC
83 " mfc0 $1, $2, 1 \n"
84 " ori $1, 0x400 \n"
85 " .set noreorder \n"
86 " mtc0 $1, $2, 1 \n"
87#elif defined(CONFIG_CPU_MIPSR2)
88 " di \n" 25 " di \n"
89#else
90 " mfc0 $1,$12 \n"
91 " ori $1,0x1f \n"
92 " xori $1,0x1f \n"
93 " .set noreorder \n"
94 " mtc0 $1,$12 \n"
95#endif
96 " irq_disable_hazard \n" 26 " irq_disable_hazard \n"
97 " .set pop \n" 27 " .set pop \n"
98 " .endm \n"); 28 " .endm \n");
@@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)
106 : "memory"); 36 : "memory");
107} 37}
108 38
109__asm__(
110 " .macro arch_local_save_flags flags \n"
111 " .set push \n"
112 " .set reorder \n"
113#ifdef CONFIG_MIPS_MT_SMTC
114 " mfc0 \\flags, $2, 1 \n"
115#else
116 " mfc0 \\flags, $12 \n"
117#endif
118 " .set pop \n"
119 " .endm \n");
120
121static inline unsigned long arch_local_save_flags(void)
122{
123 unsigned long flags;
124 asm volatile("arch_local_save_flags %0" : "=r" (flags));
125 return flags;
126}
127 39
128__asm__( 40__asm__(
129 " .macro arch_local_irq_save result \n" 41 " .macro arch_local_irq_save result \n"
130 " .set push \n" 42 " .set push \n"
131 " .set reorder \n" 43 " .set reorder \n"
132 " .set noat \n" 44 " .set noat \n"
133#ifdef CONFIG_MIPS_MT_SMTC
134 " mfc0 \\result, $2, 1 \n"
135 " ori $1, \\result, 0x400 \n"
136 " .set noreorder \n"
137 " mtc0 $1, $2, 1 \n"
138 " andi \\result, \\result, 0x400 \n"
139#elif defined(CONFIG_CPU_MIPSR2)
140 " di \\result \n" 45 " di \\result \n"
141 " andi \\result, 1 \n" 46 " andi \\result, 1 \n"
142#else
143 " mfc0 \\result, $12 \n"
144 " ori $1, \\result, 0x1f \n"
145 " xori $1, 0x1f \n"
146 " .set noreorder \n"
147 " mtc0 $1, $12 \n"
148#endif
149 " irq_disable_hazard \n" 47 " irq_disable_hazard \n"
150 " .set pop \n" 48 " .set pop \n"
151 " .endm \n"); 49 " .endm \n");
@@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)
160 return flags; 58 return flags;
161} 59}
162 60
61
163__asm__( 62__asm__(
164 " .macro arch_local_irq_restore flags \n" 63 " .macro arch_local_irq_restore flags \n"
165 " .set push \n" 64 " .set push \n"
166 " .set noreorder \n" 65 " .set noreorder \n"
167 " .set noat \n" 66 " .set noat \n"
168#ifdef CONFIG_MIPS_MT_SMTC 67#if defined(CONFIG_IRQ_CPU)
169 "mfc0 $1, $2, 1 \n"
170 "andi \\flags, 0x400 \n"
171 "ori $1, 0x400 \n"
172 "xori $1, 0x400 \n"
173 "or \\flags, $1 \n"
174 "mtc0 \\flags, $2, 1 \n"
175#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
176 /* 68 /*
177 * Slow, but doesn't suffer from a relatively unlikely race 69 * Slow, but doesn't suffer from a relatively unlikely race
178 * condition we're having since days 1. 70 * condition we're having since days 1.
179 */ 71 */
180 " beqz \\flags, 1f \n" 72 " beqz \\flags, 1f \n"
181 " di \n" 73 " di \n"
182 " ei \n" 74 " ei \n"
183 "1: \n" 75 "1: \n"
184#elif defined(CONFIG_CPU_MIPSR2) 76#else
185 /* 77 /*
186 * Fast, dangerous. Life is fun, life is good. 78 * Fast, dangerous. Life is fun, life is good.
187 */ 79 */
188 " mfc0 $1, $12 \n" 80 " mfc0 $1, $12 \n"
189 " ins $1, \\flags, 0, 1 \n" 81 " ins $1, \\flags, 0, 1 \n"
190 " mtc0 $1, $12 \n" 82 " mtc0 $1, $12 \n"
191#else
192 " mfc0 $1, $12 \n"
193 " andi \\flags, 1 \n"
194 " ori $1, 0x1f \n"
195 " xori $1, 0x1f \n"
196 " or \\flags, $1 \n"
197 " mtc0 \\flags, $12 \n"
198#endif 83#endif
199 " irq_disable_hazard \n" 84 " irq_disable_hazard \n"
200 " .set pop \n" 85 " .set pop \n"
201 " .endm \n"); 86 " .endm \n");
202 87
203
204static inline void arch_local_irq_restore(unsigned long flags) 88static inline void arch_local_irq_restore(unsigned long flags)
205{ 89{
206 unsigned long __tmp1; 90 unsigned long __tmp1;
207 91
208#ifdef CONFIG_MIPS_MT_SMTC
209 /*
210 * SMTC kernel needs to do a software replay of queued
211 * IPIs, at the cost of branch and call overhead on each
212 * local_irq_restore()
213 */
214 if (unlikely(!(flags & 0x0400)))
215 smtc_ipi_replay();
216#endif
217
218 __asm__ __volatile__( 92 __asm__ __volatile__(
219 "arch_local_irq_restore\t%0" 93 "arch_local_irq_restore\t%0"
220 : "=r" (__tmp1) 94 : "=r" (__tmp1)
@@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)
232 : "0" (flags) 106 : "0" (flags)
233 : "memory"); 107 : "memory");
234} 108}
109#else
110/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
111void arch_local_irq_disable(void);
112unsigned long arch_local_irq_save(void);
113void arch_local_irq_restore(unsigned long flags);
114void __arch_local_irq_restore(unsigned long flags);
115#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
116
117
118__asm__(
119 " .macro arch_local_irq_enable \n"
120 " .set push \n"
121 " .set reorder \n"
122 " .set noat \n"
123#ifdef CONFIG_MIPS_MT_SMTC
124 " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
125 " ori $1, 0x400 \n"
126 " xori $1, 0x400 \n"
127 " mtc0 $1, $2, 1 \n"
128#elif defined(CONFIG_CPU_MIPSR2)
129 " ei \n"
130#else
131 " mfc0 $1,$12 \n"
132 " ori $1,0x1f \n"
133 " xori $1,0x1e \n"
134 " mtc0 $1,$12 \n"
135#endif
136 " irq_enable_hazard \n"
137 " .set pop \n"
138 " .endm");
139
140extern void smtc_ipi_replay(void);
141
142static inline void arch_local_irq_enable(void)
143{
144#ifdef CONFIG_MIPS_MT_SMTC
145 /*
146 * SMTC kernel needs to do a software replay of queued
147 * IPIs, at the cost of call overhead on each local_irq_enable()
148 */
149 smtc_ipi_replay();
150#endif
151 __asm__ __volatile__(
152 "arch_local_irq_enable"
153 : /* no outputs */
154 : /* no inputs */
155 : "memory");
156}
157
158
159__asm__(
160 " .macro arch_local_save_flags flags \n"
161 " .set push \n"
162 " .set reorder \n"
163#ifdef CONFIG_MIPS_MT_SMTC
164 " mfc0 \\flags, $2, 1 \n"
165#else
166 " mfc0 \\flags, $12 \n"
167#endif
168 " .set pop \n"
169 " .endm \n");
170
171static inline unsigned long arch_local_save_flags(void)
172{
173 unsigned long flags;
174 asm volatile("arch_local_save_flags %0" : "=r" (flags));
175 return flags;
176}
177
235 178
236static inline int arch_irqs_disabled_flags(unsigned long flags) 179static inline int arch_irqs_disabled_flags(unsigned long flags)
237{ 180{
@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
245#endif 188#endif
246} 189}
247 190
248#endif 191#endif /* #ifndef __ASSEMBLY__ */
249 192
250/* 193/*
251 * Do the CPU's IRQ-state tracing from assembly code. 194 * Do the CPU's IRQ-state tracing from assembly code.