diff options
author | Masato Noguchi <Masato.Noguchi@jp.sony.com> | 2006-06-19 14:33:33 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-06-21 01:01:32 -0400 |
commit | ba723fe2b2facc8d45b53701fec39aa429596759 (patch) | |
tree | 637c2ed713cbfa712d4306f98a43656379e1c5ba /arch/powerpc/platforms | |
parent | 6e18b27bd0911d4d2495c6fdeeac0c047d915300 (diff) |
[POWERPC] spufs: clear class2 interrupt status before wakeup
SPU interrupt status must be cleared before handle it.
Otherwise, kernel may drop some interrupt packet.
Currently, class2 interrupt treated like:
1) call callback to wake up waiting process
2) mask raised mailbox interrupt
3) clear interrupt status
I changed like:
1) mask raised mailbox interrupt
2) clear interrupt status
3) call callback to wake up waiting process
Clearing status before masking will make spurious interrupt.
Thus, it is necessary to hold by steps I described above, I think.
Signed-off-by: Masato Noguchi <Masato.Noguchi@jp.sony.com>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 78 |
1 files changed, 19 insertions, 59 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 249a0af4cc99..db82f503ba2c 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -140,55 +140,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) | |||
140 | spu->dar = ea; | 140 | spu->dar = ea; |
141 | spu->dsisr = dsisr; | 141 | spu->dsisr = dsisr; |
142 | mb(); | 142 | mb(); |
143 | if (spu->stop_callback) | 143 | spu->stop_callback(spu); |
144 | spu->stop_callback(spu); | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | static int __spu_trap_mailbox(struct spu *spu) | ||
149 | { | ||
150 | if (spu->ibox_callback) | ||
151 | spu->ibox_callback(spu); | ||
152 | |||
153 | /* atomically disable SPU mailbox interrupts */ | ||
154 | spin_lock(&spu->register_lock); | ||
155 | spu_int_mask_and(spu, 2, ~0x1); | ||
156 | spin_unlock(&spu->register_lock); | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static int __spu_trap_stop(struct spu *spu) | ||
161 | { | ||
162 | pr_debug("%s\n", __FUNCTION__); | ||
163 | if (spu->stop_callback) | ||
164 | spu->stop_callback(spu); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static int __spu_trap_halt(struct spu *spu) | ||
169 | { | ||
170 | pr_debug("%s\n", __FUNCTION__); | ||
171 | if (spu->stop_callback) | ||
172 | spu->stop_callback(spu); | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int __spu_trap_tag_group(struct spu *spu) | ||
177 | { | ||
178 | pr_debug("%s\n", __FUNCTION__); | ||
179 | spu->mfc_callback(spu); | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static int __spu_trap_spubox(struct spu *spu) | ||
184 | { | ||
185 | if (spu->wbox_callback) | ||
186 | spu->wbox_callback(spu); | ||
187 | |||
188 | /* atomically disable SPU mailbox interrupts */ | ||
189 | spin_lock(&spu->register_lock); | ||
190 | spu_int_mask_and(spu, 2, ~0x10); | ||
191 | spin_unlock(&spu->register_lock); | ||
192 | return 0; | 144 | return 0; |
193 | } | 145 | } |
194 | 146 | ||
@@ -199,8 +151,7 @@ spu_irq_class_0(int irq, void *data, struct pt_regs *regs) | |||
199 | 151 | ||
200 | spu = data; | 152 | spu = data; |
201 | spu->class_0_pending = 1; | 153 | spu->class_0_pending = 1; |
202 | if (spu->stop_callback) | 154 | spu->stop_callback(spu); |
203 | spu->stop_callback(spu); | ||
204 | 155 | ||
205 | return IRQ_HANDLED; | 156 | return IRQ_HANDLED; |
206 | } | 157 | } |
@@ -278,29 +229,38 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs) | |||
278 | unsigned long mask; | 229 | unsigned long mask; |
279 | 230 | ||
280 | spu = data; | 231 | spu = data; |
232 | spin_lock(&spu->register_lock); | ||
281 | stat = spu_int_stat_get(spu, 2); | 233 | stat = spu_int_stat_get(spu, 2); |
282 | mask = spu_int_mask_get(spu, 2); | 234 | mask = spu_int_mask_get(spu, 2); |
235 | /* ignore interrupts we're not waiting for */ | ||
236 | stat &= mask; | ||
237 | /* | ||
238 | * mailbox interrupts (0x1 and 0x10) are level triggered. | ||
239 | * mask them now before acknowledging. | ||
240 | */ | ||
241 | if (stat & 0x11) | ||
242 | spu_int_mask_and(spu, 2, ~(stat & 0x11)); | ||
243 | /* acknowledge all interrupts before the callbacks */ | ||
244 | spu_int_stat_clear(spu, 2, stat); | ||
245 | spin_unlock(&spu->register_lock); | ||
283 | 246 | ||
284 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); | 247 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
285 | 248 | ||
286 | stat &= mask; | ||
287 | |||
288 | if (stat & 1) /* PPC core mailbox */ | 249 | if (stat & 1) /* PPC core mailbox */ |
289 | __spu_trap_mailbox(spu); | 250 | spu->ibox_callback(spu); |
290 | 251 | ||
291 | if (stat & 2) /* SPU stop-and-signal */ | 252 | if (stat & 2) /* SPU stop-and-signal */ |
292 | __spu_trap_stop(spu); | 253 | spu->stop_callback(spu); |
293 | 254 | ||
294 | if (stat & 4) /* SPU halted */ | 255 | if (stat & 4) /* SPU halted */ |
295 | __spu_trap_halt(spu); | 256 | spu->stop_callback(spu); |
296 | 257 | ||
297 | if (stat & 8) /* DMA tag group complete */ | 258 | if (stat & 8) /* DMA tag group complete */ |
298 | __spu_trap_tag_group(spu); | 259 | spu->mfc_callback(spu); |
299 | 260 | ||
300 | if (stat & 0x10) /* SPU mailbox threshold */ | 261 | if (stat & 0x10) /* SPU mailbox threshold */ |
301 | __spu_trap_spubox(spu); | 262 | spu->wbox_callback(spu); |
302 | 263 | ||
303 | spu_int_stat_clear(spu, 2, stat); | ||
304 | return stat ? IRQ_HANDLED : IRQ_NONE; | 264 | return stat ? IRQ_HANDLED : IRQ_NONE; |
305 | } | 265 | } |
306 | 266 | ||