aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLuke Browning <lukebrowning@us.ibm.com>2008-06-13 00:17:35 -0400
committerJeremy Kerr <jk@ozlabs.org>2008-06-16 00:35:01 -0400
commit2c911a14b74fa9cf815a936f310e4fa85bee77ce (patch)
treea6c5953c1c453cbd9affe378e58629519d9c50d2 /arch/powerpc
parent1f64643aa5f5a17f1723f7ea0f17b7a3a8f632b3 (diff)
powerpc/spufs: synchronize interaction between spu exception handling and time slicing
Time slicing can occur at the same time as spu exception handling resulting in the wakeup of the wrong thread. This change uses the the spu's register_lock to enforce synchronization between bind/unbind and spu exception handling so that they are mutually exclusive. Signed-off-by: Luke Browning <lukebrowning@us.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c42
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c14
2 files changed, 37 insertions, 19 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 96b5f0f1c11e..78f905bc6a42 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -219,15 +219,25 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
219extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX 219extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
220static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) 220static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
221{ 221{
222 int ret;
223
222 pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea); 224 pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea);
223 225
224 /* Handle kernel space hash faults immediately. 226 /*
225 User hash faults need to be deferred to process context. */ 227 * Handle kernel space hash faults immediately. User hash
226 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) 228 * faults need to be deferred to process context.
227 && REGION_ID(ea) != USER_REGION_ID 229 */
228 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { 230 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
229 spu_restart_dma(spu); 231 (REGION_ID(ea) != USER_REGION_ID)) {
230 return 0; 232
233 spin_unlock(&spu->register_lock);
234 ret = hash_page(ea, _PAGE_PRESENT, 0x300);
235 spin_lock(&spu->register_lock);
236
237 if (!ret) {
238 spu_restart_dma(spu);
239 return 0;
240 }
231 } 241 }
232 242
233 spu->class_1_dar = ea; 243 spu->class_1_dar = ea;
@@ -325,14 +335,12 @@ spu_irq_class_0(int irq, void *data)
325 335
326 spu->class_0_pending |= stat; 336 spu->class_0_pending |= stat;
327 spu->class_0_dar = spu_mfc_dar_get(spu); 337 spu->class_0_dar = spu_mfc_dar_get(spu);
328 spin_unlock(&spu->register_lock);
329
330 spu->stop_callback(spu, 0); 338 spu->stop_callback(spu, 0);
331
332 spu->class_0_pending = 0; 339 spu->class_0_pending = 0;
333 spu->class_0_dar = 0; 340 spu->class_0_dar = 0;
334 341
335 spu_int_stat_clear(spu, 0, stat); 342 spu_int_stat_clear(spu, 0, stat);
343 spin_unlock(&spu->register_lock);
336 344
337 return IRQ_HANDLED; 345 return IRQ_HANDLED;
338} 346}
@@ -355,13 +363,12 @@ spu_irq_class_1(int irq, void *data)
355 spu_mfc_dsisr_set(spu, 0ul); 363 spu_mfc_dsisr_set(spu, 0ul);
356 spu_int_stat_clear(spu, 1, stat); 364 spu_int_stat_clear(spu, 1, stat);
357 365
358 if (stat & CLASS1_SEGMENT_FAULT_INTR)
359 __spu_trap_data_seg(spu, dar);
360
361 spin_unlock(&spu->register_lock);
362 pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat, 366 pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
363 dar, dsisr); 367 dar, dsisr);
364 368
369 if (stat & CLASS1_SEGMENT_FAULT_INTR)
370 __spu_trap_data_seg(spu, dar);
371
365 if (stat & CLASS1_STORAGE_FAULT_INTR) 372 if (stat & CLASS1_STORAGE_FAULT_INTR)
366 __spu_trap_data_map(spu, dar, dsisr); 373 __spu_trap_data_map(spu, dar, dsisr);
367 374
@@ -374,6 +381,8 @@ spu_irq_class_1(int irq, void *data)
374 spu->class_1_dsisr = 0; 381 spu->class_1_dsisr = 0;
375 spu->class_1_dar = 0; 382 spu->class_1_dar = 0;
376 383
384 spin_unlock(&spu->register_lock);
385
377 return stat ? IRQ_HANDLED : IRQ_NONE; 386 return stat ? IRQ_HANDLED : IRQ_NONE;
378} 387}
379 388
@@ -392,14 +401,12 @@ spu_irq_class_2(int irq, void *data)
392 mask = spu_int_mask_get(spu, 2); 401 mask = spu_int_mask_get(spu, 2);
393 /* ignore interrupts we're not waiting for */ 402 /* ignore interrupts we're not waiting for */
394 stat &= mask; 403 stat &= mask;
395
396 /* mailbox interrupts are level triggered. mask them now before 404 /* mailbox interrupts are level triggered. mask them now before
397 * acknowledging */ 405 * acknowledging */
398 if (stat & mailbox_intrs) 406 if (stat & mailbox_intrs)
399 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); 407 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
400 /* acknowledge all interrupts before the callbacks */ 408 /* acknowledge all interrupts before the callbacks */
401 spu_int_stat_clear(spu, 2, stat); 409 spu_int_stat_clear(spu, 2, stat);
402 spin_unlock(&spu->register_lock);
403 410
404 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); 411 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
405 412
@@ -419,6 +426,9 @@ spu_irq_class_2(int irq, void *data)
419 spu->wbox_callback(spu); 426 spu->wbox_callback(spu);
420 427
421 spu->stats.class2_intr++; 428 spu->stats.class2_intr++;
429
430 spin_unlock(&spu->register_lock);
431
422 return stat ? IRQ_HANDLED : IRQ_NONE; 432 return stat ? IRQ_HANDLED : IRQ_NONE;
423} 433}
424 434
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 745dd51ec37f..cd725670b1b5 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -230,19 +230,23 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
230 ctx->stats.slb_flt_base = spu->stats.slb_flt; 230 ctx->stats.slb_flt_base = spu->stats.slb_flt;
231 ctx->stats.class2_intr_base = spu->stats.class2_intr; 231 ctx->stats.class2_intr_base = spu->stats.class2_intr;
232 232
233 spu_associate_mm(spu, ctx->owner);
234
235 spin_lock_irq(&spu->register_lock);
233 spu->ctx = ctx; 236 spu->ctx = ctx;
234 spu->flags = 0; 237 spu->flags = 0;
235 ctx->spu = spu; 238 ctx->spu = spu;
236 ctx->ops = &spu_hw_ops; 239 ctx->ops = &spu_hw_ops;
237 spu->pid = current->pid; 240 spu->pid = current->pid;
238 spu->tgid = current->tgid; 241 spu->tgid = current->tgid;
239 spu_associate_mm(spu, ctx->owner);
240 spu->ibox_callback = spufs_ibox_callback; 242 spu->ibox_callback = spufs_ibox_callback;
241 spu->wbox_callback = spufs_wbox_callback; 243 spu->wbox_callback = spufs_wbox_callback;
242 spu->stop_callback = spufs_stop_callback; 244 spu->stop_callback = spufs_stop_callback;
243 spu->mfc_callback = spufs_mfc_callback; 245 spu->mfc_callback = spufs_mfc_callback;
244 mb(); 246 spin_unlock_irq(&spu->register_lock);
247
245 spu_unmap_mappings(ctx); 248 spu_unmap_mappings(ctx);
249
246 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); 250 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
247 spu_restore(&ctx->csa, spu); 251 spu_restore(&ctx->csa, spu);
248 spu->timestamp = jiffies; 252 spu->timestamp = jiffies;
@@ -423,18 +427,22 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
423 spu_unmap_mappings(ctx); 427 spu_unmap_mappings(ctx);
424 spu_save(&ctx->csa, spu); 428 spu_save(&ctx->csa, spu);
425 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); 429 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
430
431 spin_lock_irq(&spu->register_lock);
426 spu->timestamp = jiffies; 432 spu->timestamp = jiffies;
427 ctx->state = SPU_STATE_SAVED; 433 ctx->state = SPU_STATE_SAVED;
428 spu->ibox_callback = NULL; 434 spu->ibox_callback = NULL;
429 spu->wbox_callback = NULL; 435 spu->wbox_callback = NULL;
430 spu->stop_callback = NULL; 436 spu->stop_callback = NULL;
431 spu->mfc_callback = NULL; 437 spu->mfc_callback = NULL;
432 spu_associate_mm(spu, NULL);
433 spu->pid = 0; 438 spu->pid = 0;
434 spu->tgid = 0; 439 spu->tgid = 0;
435 ctx->ops = &spu_backing_ops; 440 ctx->ops = &spu_backing_ops;
436 spu->flags = 0; 441 spu->flags = 0;
437 spu->ctx = NULL; 442 spu->ctx = NULL;
443 spin_unlock_irq(&spu->register_lock);
444
445 spu_associate_mm(spu, NULL);
438 446
439 ctx->stats.slb_flt += 447 ctx->stats.slb_flt +=
440 (spu->stats.slb_flt - ctx->stats.slb_flt_base); 448 (spu->stats.slb_flt - ctx->stats.slb_flt_base);