aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mips/hazards.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2006-03-13 11:16:29 -0500
committerRalf Baechle <ralf@linux-mips.org>2006-03-18 11:59:26 -0500
commita3c4946db4fe64cb21b66a09e89890678aac6d65 (patch)
tree3b63d5e765af3eedbc1cda84135f1b702a43a6f2 /include/asm-mips/hazards.h
parent3a2f735700332621274aca752be3b6f839fa47e7 (diff)
[MIPS] SB1: Fix interrupt disable hazard.
The SB1 core has a three cycle interrupt disable hazard but we were wrongly treating it as fully interlocked. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include/asm-mips/hazards.h')
-rw-r--r--include/asm-mips/hazards.h180
1 files changed, 103 insertions, 77 deletions
diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h
index 6111a0ce58c4..feb29a793888 100644
--- a/include/asm-mips/hazards.h
+++ b/include/asm-mips/hazards.h
@@ -3,7 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2003, 2004 Ralf Baechle 6 * Copyright (C) 2003, 2004 Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) MIPS Technologies, Inc.
8 * written by Ralf Baechle <ralf@linux-mips.org>
7 */ 9 */
8#ifndef _ASM_HAZARDS_H 10#ifndef _ASM_HAZARDS_H
9#define _ASM_HAZARDS_H 11#define _ASM_HAZARDS_H
@@ -74,8 +76,7 @@
74#define irq_disable_hazard 76#define irq_disable_hazard
75 _ehb 77 _ehb
76 78
77#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \ 79#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
78 defined(CONFIG_CPU_SB1)
79 80
80/* 81/*
81 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 82 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
@@ -99,13 +100,13 @@
99#else /* __ASSEMBLY__ */ 100#else /* __ASSEMBLY__ */
100 101
101__asm__( 102__asm__(
102 " .macro _ssnop \n\t" 103 " .macro _ssnop \n"
103 " sll $0, $0, 1 \n\t" 104 " sll $0, $0, 1 \n"
104 " .endm \n\t" 105 " .endm \n"
105 " \n\t" 106 " \n"
106 " .macro _ehb \n\t" 107 " .macro _ehb \n"
107 " sll $0, $0, 3 \n\t" 108 " sll $0, $0, 3 \n"
108 " .endm \n\t"); 109 " .endm \n");
109 110
110#ifdef CONFIG_CPU_RM9000 111#ifdef CONFIG_CPU_RM9000
111 112
@@ -117,17 +118,21 @@ __asm__(
117 118
118#define mtc0_tlbw_hazard() \ 119#define mtc0_tlbw_hazard() \
119 __asm__ __volatile__( \ 120 __asm__ __volatile__( \
120 ".set\tmips32\n\t" \ 121 " .set mips32 \n" \
121 "_ssnop; _ssnop; _ssnop; _ssnop\n\t" \ 122 " _ssnop \n" \
122 ".set\tmips0") 123 " _ssnop \n" \
124 " _ssnop \n" \
125 " _ssnop \n" \
126 " .set mips0 \n")
123 127
124#define tlbw_use_hazard() \ 128#define tlbw_use_hazard() \
125 __asm__ __volatile__( \ 129 __asm__ __volatile__( \
126 ".set\tmips32\n\t" \ 130 " .set mips32 \n" \
127 "_ssnop; _ssnop; _ssnop; _ssnop\n\t" \ 131 " _ssnop \n" \
128 ".set\tmips0") 132 " _ssnop \n" \
129 133 " _ssnop \n" \
130#define back_to_back_c0_hazard() do { } while (0) 134 " _ssnop \n" \
135 " .set mips0 \n")
131 136
132#else 137#else
133 138
@@ -136,15 +141,25 @@ __asm__(
136 */ 141 */
137#define mtc0_tlbw_hazard() \ 142#define mtc0_tlbw_hazard() \
138 __asm__ __volatile__( \ 143 __asm__ __volatile__( \
139 ".set noreorder\n\t" \ 144 " .set noreorder \n" \
140 "nop; nop; nop; nop; nop; nop;\n\t" \ 145 " nop \n" \
141 ".set reorder\n\t") 146 " nop \n" \
147 " nop \n" \
148 " nop \n" \
149 " nop \n" \
150 " nop \n" \
151 " .set reorder \n")
142 152
143#define tlbw_use_hazard() \ 153#define tlbw_use_hazard() \
144 __asm__ __volatile__( \ 154 __asm__ __volatile__( \
145 ".set noreorder\n\t" \ 155 " .set noreorder \n" \
146 "nop; nop; nop; nop; nop; nop;\n\t" \ 156 " nop \n" \
147 ".set reorder\n\t") 157 " nop \n" \
158 " nop \n" \
159 " nop \n" \
160 " nop \n" \
161 " nop \n" \
162 " .set reorder \n")
148 163
149#endif 164#endif
150 165
@@ -156,49 +171,26 @@ __asm__(
156 171
157#ifdef CONFIG_CPU_MIPSR2 172#ifdef CONFIG_CPU_MIPSR2
158 173
159__asm__( 174__asm__(" .macro irq_enable_hazard \n"
160 " .macro\tirq_enable_hazard \n\t" 175 " _ehb \n"
161 " _ehb \n\t" 176 " .endm \n"
162 " .endm \n\t" 177 " \n"
163 " \n\t" 178 " .macro irq_disable_hazard \n"
164 " .macro\tirq_disable_hazard \n\t" 179 " _ehb \n"
165 " _ehb \n\t" 180 " .endm \n");
166 " .endm \n\t"
167 " \n\t"
168 " .macro\tback_to_back_c0_hazard \n\t"
169 " _ehb \n\t"
170 " .endm");
171
172#define irq_enable_hazard() \
173 __asm__ __volatile__( \
174 "irq_enable_hazard")
175 181
176#define irq_disable_hazard() \ 182#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
177 __asm__ __volatile__( \
178 "irq_disable_hazard")
179
180#define back_to_back_c0_hazard() \
181 __asm__ __volatile__( \
182 "back_to_back_c0_hazard")
183
184#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
185 defined(CONFIG_CPU_SB1)
186 183
187/* 184/*
188 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 185 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
189 */ 186 */
190 187
191__asm__( 188__asm__(
192 " .macro\tirq_enable_hazard \n\t" 189 " .macro irq_enable_hazard \n"
193 " .endm \n\t" 190 " .endm \n"
194 " \n\t" 191 " \n"
195 " .macro\tirq_disable_hazard \n\t" 192 " .macro irq_disable_hazard \n"
196 " .endm"); 193 " .endm \n");
197
198#define irq_enable_hazard() do { } while (0)
199#define irq_disable_hazard() do { } while (0)
200
201#define back_to_back_c0_hazard() do { } while (0)
202 194
203#else 195#else
204 196
@@ -209,29 +201,63 @@ __asm__(
209 */ 201 */
210 202
211__asm__( 203__asm__(
212 " # \n\t" 204 " # \n"
213 " # There is a hazard but we do not care \n\t" 205 " # There is a hazard but we do not care \n"
214 " # \n\t" 206 " # \n"
215 " .macro\tirq_enable_hazard \n\t" 207 " .macro\tirq_enable_hazard \n"
216 " .endm \n\t" 208 " .endm \n"
217 " \n\t" 209 " \n"
218 " .macro\tirq_disable_hazard \n\t" 210 " .macro\tirq_disable_hazard \n"
219 " _ssnop; _ssnop; _ssnop \n\t" 211 " _ssnop \n"
220 " .endm"); 212 " _ssnop \n"
213 " _ssnop \n"
214 " .endm \n");
221 215
222#define irq_enable_hazard() do { } while (0) 216#endif
217
218#define irq_enable_hazard() \
219 __asm__ __volatile__("irq_enable_hazard")
223#define irq_disable_hazard() \ 220#define irq_disable_hazard() \
224 __asm__ __volatile__( \ 221 __asm__ __volatile__("irq_disable_hazard")
225 "irq_disable_hazard")
226 222
227#define back_to_back_c0_hazard() \ 223
228 __asm__ __volatile__( \ 224/*
229 " .set noreorder \n" \ 225 * Back-to-back hazards -
230 " nop; nop; nop \n" \ 226 *
231 " .set reorder \n") 227 * What is needed to separate a move to cp0 from a subsequent read from the
228 * same cp0 register?
229 */
230#ifdef CONFIG_CPU_MIPSR2
231
232__asm__(" .macro back_to_back_c0_hazard \n"
233 " _ehb \n"
234 " .endm \n");
235
236#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
237 defined(CONFIG_CPU_SB1)
238
239__asm__(" .macro back_to_back_c0_hazard \n"
240 " .endm \n");
241
242#else
243
244__asm__(" .macro back_to_back_c0_hazard \n"
245 " .set noreorder \n"
246 " _ssnop \n"
247 " _ssnop \n"
248 " _ssnop \n"
249 " .set reorder \n"
250 " .endm");
232 251
233#endif 252#endif
234 253
254#define back_to_back_c0_hazard() \
255 __asm__ __volatile__("back_to_back_c0_hazard")
256
257
258/*
259 * Instruction execution hazard
260 */
235#ifdef CONFIG_CPU_MIPSR2 261#ifdef CONFIG_CPU_MIPSR2
236/* 262/*
237 * gcc has a tradition of misscompiling the previous construct using the 263 * gcc has a tradition of misscompiling the previous construct using the