aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c44
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c21
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c19
-rw-r--r--arch/powerpc/xmon/xmon.c1
4 files changed, 54 insertions, 31 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 70c660121ec4..78f905bc6a42 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -219,15 +219,25 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
219extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX 219extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
220static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) 220static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
221{ 221{
222 int ret;
223
222 pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea); 224 pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea);
223 225
224 /* Handle kernel space hash faults immediately. 226 /*
225 User hash faults need to be deferred to process context. */ 227 * Handle kernel space hash faults immediately. User hash
226 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) 228 * faults need to be deferred to process context.
227 && REGION_ID(ea) != USER_REGION_ID 229 */
228 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { 230 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
229 spu_restart_dma(spu); 231 (REGION_ID(ea) != USER_REGION_ID)) {
230 return 0; 232
233 spin_unlock(&spu->register_lock);
234 ret = hash_page(ea, _PAGE_PRESENT, 0x300);
235 spin_lock(&spu->register_lock);
236
237 if (!ret) {
238 spu_restart_dma(spu);
239 return 0;
240 }
231 } 241 }
232 242
233 spu->class_1_dar = ea; 243 spu->class_1_dar = ea;
@@ -324,17 +334,13 @@ spu_irq_class_0(int irq, void *data)
324 stat = spu_int_stat_get(spu, 0) & mask; 334 stat = spu_int_stat_get(spu, 0) & mask;
325 335
326 spu->class_0_pending |= stat; 336 spu->class_0_pending |= stat;
327 spu->class_0_dsisr = spu_mfc_dsisr_get(spu);
328 spu->class_0_dar = spu_mfc_dar_get(spu); 337 spu->class_0_dar = spu_mfc_dar_get(spu);
329 spin_unlock(&spu->register_lock);
330
331 spu->stop_callback(spu, 0); 338 spu->stop_callback(spu, 0);
332
333 spu->class_0_pending = 0; 339 spu->class_0_pending = 0;
334 spu->class_0_dsisr = 0;
335 spu->class_0_dar = 0; 340 spu->class_0_dar = 0;
336 341
337 spu_int_stat_clear(spu, 0, stat); 342 spu_int_stat_clear(spu, 0, stat);
343 spin_unlock(&spu->register_lock);
338 344
339 return IRQ_HANDLED; 345 return IRQ_HANDLED;
340} 346}
@@ -357,13 +363,12 @@ spu_irq_class_1(int irq, void *data)
357 spu_mfc_dsisr_set(spu, 0ul); 363 spu_mfc_dsisr_set(spu, 0ul);
358 spu_int_stat_clear(spu, 1, stat); 364 spu_int_stat_clear(spu, 1, stat);
359 365
360 if (stat & CLASS1_SEGMENT_FAULT_INTR)
361 __spu_trap_data_seg(spu, dar);
362
363 spin_unlock(&spu->register_lock);
364 pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat, 366 pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
365 dar, dsisr); 367 dar, dsisr);
366 368
369 if (stat & CLASS1_SEGMENT_FAULT_INTR)
370 __spu_trap_data_seg(spu, dar);
371
367 if (stat & CLASS1_STORAGE_FAULT_INTR) 372 if (stat & CLASS1_STORAGE_FAULT_INTR)
368 __spu_trap_data_map(spu, dar, dsisr); 373 __spu_trap_data_map(spu, dar, dsisr);
369 374
@@ -376,6 +381,8 @@ spu_irq_class_1(int irq, void *data)
376 spu->class_1_dsisr = 0; 381 spu->class_1_dsisr = 0;
377 spu->class_1_dar = 0; 382 spu->class_1_dar = 0;
378 383
384 spin_unlock(&spu->register_lock);
385
379 return stat ? IRQ_HANDLED : IRQ_NONE; 386 return stat ? IRQ_HANDLED : IRQ_NONE;
380} 387}
381 388
@@ -394,14 +401,12 @@ spu_irq_class_2(int irq, void *data)
394 mask = spu_int_mask_get(spu, 2); 401 mask = spu_int_mask_get(spu, 2);
395 /* ignore interrupts we're not waiting for */ 402 /* ignore interrupts we're not waiting for */
396 stat &= mask; 403 stat &= mask;
397
398 /* mailbox interrupts are level triggered. mask them now before 404 /* mailbox interrupts are level triggered. mask them now before
399 * acknowledging */ 405 * acknowledging */
400 if (stat & mailbox_intrs) 406 if (stat & mailbox_intrs)
401 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); 407 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
402 /* acknowledge all interrupts before the callbacks */ 408 /* acknowledge all interrupts before the callbacks */
403 spu_int_stat_clear(spu, 2, stat); 409 spu_int_stat_clear(spu, 2, stat);
404 spin_unlock(&spu->register_lock);
405 410
406 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); 411 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
407 412
@@ -421,6 +426,9 @@ spu_irq_class_2(int irq, void *data)
421 spu->wbox_callback(spu); 426 spu->wbox_callback(spu);
422 427
423 spu->stats.class2_intr++; 428 spu->stats.class2_intr++;
429
430 spin_unlock(&spu->register_lock);
431
424 return stat ? IRQ_HANDLED : IRQ_NONE; 432 return stat ? IRQ_HANDLED : IRQ_NONE;
425} 433}
426 434
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index b7493b865812..f7edba6cb795 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -27,7 +27,6 @@ void spufs_stop_callback(struct spu *spu, int irq)
27 switch(irq) { 27 switch(irq) {
28 case 0 : 28 case 0 :
29 ctx->csa.class_0_pending = spu->class_0_pending; 29 ctx->csa.class_0_pending = spu->class_0_pending;
30 ctx->csa.class_0_dsisr = spu->class_0_dsisr;
31 ctx->csa.class_0_dar = spu->class_0_dar; 30 ctx->csa.class_0_dar = spu->class_0_dar;
32 break; 31 break;
33 case 1 : 32 case 1 :
@@ -51,18 +50,22 @@ int spu_stopped(struct spu_context *ctx, u32 *stat)
51 u64 dsisr; 50 u64 dsisr;
52 u32 stopped; 51 u32 stopped;
53 52
54 *stat = ctx->ops->status_read(ctx);
55
56 if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
57 return 1;
58
59 stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | 53 stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
60 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; 54 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
61 if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped)) 55
56top:
57 *stat = ctx->ops->status_read(ctx);
58 if (*stat & stopped) {
59 /*
60 * If the spu hasn't finished stopping, we need to
61 * re-read the register to get the stopped value.
62 */
63 if (*stat & SPU_STATUS_RUNNING)
64 goto top;
62 return 1; 65 return 1;
66 }
63 67
64 dsisr = ctx->csa.class_0_dsisr; 68 if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
65 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
66 return 1; 69 return 1;
67 70
68 dsisr = ctx->csa.class_1_dsisr; 71 dsisr = ctx->csa.class_1_dsisr;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 745dd51ec37f..e929e70a84e3 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -230,19 +230,23 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
230 ctx->stats.slb_flt_base = spu->stats.slb_flt; 230 ctx->stats.slb_flt_base = spu->stats.slb_flt;
231 ctx->stats.class2_intr_base = spu->stats.class2_intr; 231 ctx->stats.class2_intr_base = spu->stats.class2_intr;
232 232
233 spu_associate_mm(spu, ctx->owner);
234
235 spin_lock_irq(&spu->register_lock);
233 spu->ctx = ctx; 236 spu->ctx = ctx;
234 spu->flags = 0; 237 spu->flags = 0;
235 ctx->spu = spu; 238 ctx->spu = spu;
236 ctx->ops = &spu_hw_ops; 239 ctx->ops = &spu_hw_ops;
237 spu->pid = current->pid; 240 spu->pid = current->pid;
238 spu->tgid = current->tgid; 241 spu->tgid = current->tgid;
239 spu_associate_mm(spu, ctx->owner);
240 spu->ibox_callback = spufs_ibox_callback; 242 spu->ibox_callback = spufs_ibox_callback;
241 spu->wbox_callback = spufs_wbox_callback; 243 spu->wbox_callback = spufs_wbox_callback;
242 spu->stop_callback = spufs_stop_callback; 244 spu->stop_callback = spufs_stop_callback;
243 spu->mfc_callback = spufs_mfc_callback; 245 spu->mfc_callback = spufs_mfc_callback;
244 mb(); 246 spin_unlock_irq(&spu->register_lock);
247
245 spu_unmap_mappings(ctx); 248 spu_unmap_mappings(ctx);
249
246 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); 250 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
247 spu_restore(&ctx->csa, spu); 251 spu_restore(&ctx->csa, spu);
248 spu->timestamp = jiffies; 252 spu->timestamp = jiffies;
@@ -403,6 +407,8 @@ static int has_affinity(struct spu_context *ctx)
403 */ 407 */
404static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) 408static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
405{ 409{
410 u32 status;
411
406 spu_context_trace(spu_unbind_context__enter, ctx, spu); 412 spu_context_trace(spu_unbind_context__enter, ctx, spu);
407 413
408 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 414 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
@@ -423,18 +429,22 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
423 spu_unmap_mappings(ctx); 429 spu_unmap_mappings(ctx);
424 spu_save(&ctx->csa, spu); 430 spu_save(&ctx->csa, spu);
425 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); 431 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
432
433 spin_lock_irq(&spu->register_lock);
426 spu->timestamp = jiffies; 434 spu->timestamp = jiffies;
427 ctx->state = SPU_STATE_SAVED; 435 ctx->state = SPU_STATE_SAVED;
428 spu->ibox_callback = NULL; 436 spu->ibox_callback = NULL;
429 spu->wbox_callback = NULL; 437 spu->wbox_callback = NULL;
430 spu->stop_callback = NULL; 438 spu->stop_callback = NULL;
431 spu->mfc_callback = NULL; 439 spu->mfc_callback = NULL;
432 spu_associate_mm(spu, NULL);
433 spu->pid = 0; 440 spu->pid = 0;
434 spu->tgid = 0; 441 spu->tgid = 0;
435 ctx->ops = &spu_backing_ops; 442 ctx->ops = &spu_backing_ops;
436 spu->flags = 0; 443 spu->flags = 0;
437 spu->ctx = NULL; 444 spu->ctx = NULL;
445 spin_unlock_irq(&spu->register_lock);
446
447 spu_associate_mm(spu, NULL);
438 448
439 ctx->stats.slb_flt += 449 ctx->stats.slb_flt +=
440 (spu->stats.slb_flt - ctx->stats.slb_flt_base); 450 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
@@ -444,6 +454,9 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
444 /* This maps the underlying spu state to idle */ 454 /* This maps the underlying spu state to idle */
445 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); 455 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
446 ctx->spu = NULL; 456 ctx->spu = NULL;
457
458 if (spu_stopped(ctx, &status))
459 wake_up_all(&ctx->stop_wq);
447} 460}
448 461
449/** 462/**
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 1702de9395ee..bfcf70ee8959 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2844,7 +2844,6 @@ static void dump_spu_fields(struct spu *spu)
2844 DUMP_FIELD(spu, "0x%lx", flags); 2844 DUMP_FIELD(spu, "0x%lx", flags);
2845 DUMP_FIELD(spu, "%d", class_0_pending); 2845 DUMP_FIELD(spu, "%d", class_0_pending);
2846 DUMP_FIELD(spu, "0x%lx", class_0_dar); 2846 DUMP_FIELD(spu, "0x%lx", class_0_dar);
2847 DUMP_FIELD(spu, "0x%lx", class_0_dsisr);
2848 DUMP_FIELD(spu, "0x%lx", class_1_dar); 2847 DUMP_FIELD(spu, "0x%lx", class_1_dar);
2849 DUMP_FIELD(spu, "0x%lx", class_1_dsisr); 2848 DUMP_FIELD(spu, "0x%lx", class_1_dsisr);
2850 DUMP_FIELD(spu, "0x%lx", irqs[0]); 2849 DUMP_FIELD(spu, "0x%lx", irqs[0]);