diff options
author | Jeremy Kerr <jk@ozlabs.org> | 2007-12-20 02:39:59 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-12-21 03:46:20 -0500 |
commit | 8af30675c3e7b945bbaf6f57b724f246e56eb209 (patch) | |
tree | a883fbefe8d2a4dc8c8ef4855e1159c94bcf7c64 /arch | |
parent | c40aa4710479b5d9f0e1fdf71b151f4c3708e3eb (diff) |
[POWERPC] spufs: use #defines for SPU class [012] exception status
Add a few #defines for the class 0, 1 and 2 interrupt status bits, and
use them instead of magic numbers when we're setting or checking for
these interrupts.
Also, add a #define for the class 2 mailbox threshold interrupt mask.
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 42 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/backing_ops.c | 17 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/hw_ops.c | 14 |
3 files changed, 40 insertions, 33 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index f73263ba9841..a560277b3ad7 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -360,18 +360,18 @@ spu_irq_class_0_bottom(struct spu *spu) | |||
360 | stat = spu->class_0_pending; | 360 | stat = spu->class_0_pending; |
361 | spu->class_0_pending = 0; | 361 | spu->class_0_pending = 0; |
362 | 362 | ||
363 | if (stat & 1) /* invalid DMA alignment */ | 363 | if (stat & CLASS0_DMA_ALIGNMENT_INTR) |
364 | __spu_trap_dma_align(spu); | 364 | __spu_trap_dma_align(spu); |
365 | 365 | ||
366 | if (stat & 2) /* invalid MFC DMA */ | 366 | if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) |
367 | __spu_trap_invalid_dma(spu); | 367 | __spu_trap_invalid_dma(spu); |
368 | 368 | ||
369 | if (stat & 4) /* error on SPU */ | 369 | if (stat & CLASS0_SPU_ERROR_INTR) |
370 | __spu_trap_error(spu); | 370 | __spu_trap_error(spu); |
371 | 371 | ||
372 | spin_unlock_irqrestore(&spu->register_lock, flags); | 372 | spin_unlock_irqrestore(&spu->register_lock, flags); |
373 | 373 | ||
374 | return (stat & 0x7) ? -EIO : 0; | 374 | return (stat & CLASS0_INTR_MASK) ? -EIO : 0; |
375 | } | 375 | } |
376 | EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); | 376 | EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); |
377 | 377 | ||
@@ -389,24 +389,23 @@ spu_irq_class_1(int irq, void *data) | |||
389 | stat = spu_int_stat_get(spu, 1) & mask; | 389 | stat = spu_int_stat_get(spu, 1) & mask; |
390 | dar = spu_mfc_dar_get(spu); | 390 | dar = spu_mfc_dar_get(spu); |
391 | dsisr = spu_mfc_dsisr_get(spu); | 391 | dsisr = spu_mfc_dsisr_get(spu); |
392 | if (stat & 2) /* mapping fault */ | 392 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
393 | spu_mfc_dsisr_set(spu, 0ul); | 393 | spu_mfc_dsisr_set(spu, 0ul); |
394 | spu_int_stat_clear(spu, 1, stat); | 394 | spu_int_stat_clear(spu, 1, stat); |
395 | spin_unlock(&spu->register_lock); | 395 | spin_unlock(&spu->register_lock); |
396 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, | 396 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, |
397 | dar, dsisr); | 397 | dar, dsisr); |
398 | 398 | ||
399 | if (stat & 1) /* segment fault */ | 399 | if (stat & CLASS1_SEGMENT_FAULT_INTR) |
400 | __spu_trap_data_seg(spu, dar); | 400 | __spu_trap_data_seg(spu, dar); |
401 | 401 | ||
402 | if (stat & 2) { /* mapping fault */ | 402 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
403 | __spu_trap_data_map(spu, dar, dsisr); | 403 | __spu_trap_data_map(spu, dar, dsisr); |
404 | } | ||
405 | 404 | ||
406 | if (stat & 4) /* ls compare & suspend on get */ | 405 | if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR) |
407 | ; | 406 | ; |
408 | 407 | ||
409 | if (stat & 8) /* ls compare & suspend on put */ | 408 | if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR) |
410 | ; | 409 | ; |
411 | 410 | ||
412 | return stat ? IRQ_HANDLED : IRQ_NONE; | 411 | return stat ? IRQ_HANDLED : IRQ_NONE; |
@@ -418,6 +417,8 @@ spu_irq_class_2(int irq, void *data) | |||
418 | struct spu *spu; | 417 | struct spu *spu; |
419 | unsigned long stat; | 418 | unsigned long stat; |
420 | unsigned long mask; | 419 | unsigned long mask; |
420 | const int mailbox_intrs = | ||
421 | CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR; | ||
421 | 422 | ||
422 | spu = data; | 423 | spu = data; |
423 | spin_lock(&spu->register_lock); | 424 | spin_lock(&spu->register_lock); |
@@ -425,31 +426,30 @@ spu_irq_class_2(int irq, void *data) | |||
425 | mask = spu_int_mask_get(spu, 2); | 426 | mask = spu_int_mask_get(spu, 2); |
426 | /* ignore interrupts we're not waiting for */ | 427 | /* ignore interrupts we're not waiting for */ |
427 | stat &= mask; | 428 | stat &= mask; |
428 | /* | 429 | |
429 | * mailbox interrupts (0x1 and 0x10) are level triggered. | 430 | /* mailbox interrupts are level triggered. mask them now before |
430 | * mask them now before acknowledging. | 431 | * acknowledging */ |
431 | */ | 432 | if (stat & mailbox_intrs) |
432 | if (stat & 0x11) | 433 | spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); |
433 | spu_int_mask_and(spu, 2, ~(stat & 0x11)); | ||
434 | /* acknowledge all interrupts before the callbacks */ | 434 | /* acknowledge all interrupts before the callbacks */ |
435 | spu_int_stat_clear(spu, 2, stat); | 435 | spu_int_stat_clear(spu, 2, stat); |
436 | spin_unlock(&spu->register_lock); | 436 | spin_unlock(&spu->register_lock); |
437 | 437 | ||
438 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); | 438 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
439 | 439 | ||
440 | if (stat & 1) /* PPC core mailbox */ | 440 | if (stat & CLASS2_MAILBOX_INTR) |
441 | spu->ibox_callback(spu); | 441 | spu->ibox_callback(spu); |
442 | 442 | ||
443 | if (stat & 2) /* SPU stop-and-signal */ | 443 | if (stat & CLASS2_SPU_STOP_INTR) |
444 | spu->stop_callback(spu); | 444 | spu->stop_callback(spu); |
445 | 445 | ||
446 | if (stat & 4) /* SPU halted */ | 446 | if (stat & CLASS2_SPU_HALT_INTR) |
447 | spu->stop_callback(spu); | 447 | spu->stop_callback(spu); |
448 | 448 | ||
449 | if (stat & 8) /* DMA tag group complete */ | 449 | if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) |
450 | spu->mfc_callback(spu); | 450 | spu->mfc_callback(spu); |
451 | 451 | ||
452 | if (stat & 0x10) /* SPU mailbox threshold */ | 452 | if (stat & CLASS2_MAILBOX_THRESHOLD_INTR) |
453 | spu->wbox_callback(spu); | 453 | spu->wbox_callback(spu); |
454 | 454 | ||
455 | spu->stats.class2_intr++; | 455 | spu->stats.class2_intr++; |
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c index 97b2d5e587cc..d4495531e5b2 100644 --- a/arch/powerpc/platforms/cell/spufs/backing_ops.c +++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c | |||
@@ -106,16 +106,20 @@ static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx, | |||
106 | if (stat & 0xff0000) | 106 | if (stat & 0xff0000) |
107 | ret |= POLLIN | POLLRDNORM; | 107 | ret |= POLLIN | POLLRDNORM; |
108 | else { | 108 | else { |
109 | ctx->csa.priv1.int_stat_class2_RW &= ~0x1; | 109 | ctx->csa.priv1.int_stat_class2_RW &= |
110 | ctx->csa.priv1.int_mask_class2_RW |= 0x1; | 110 | ~CLASS2_MAILBOX_INTR; |
111 | ctx->csa.priv1.int_mask_class2_RW |= | ||
112 | CLASS2_ENABLE_MAILBOX_INTR; | ||
111 | } | 113 | } |
112 | } | 114 | } |
113 | if (events & (POLLOUT | POLLWRNORM)) { | 115 | if (events & (POLLOUT | POLLWRNORM)) { |
114 | if (stat & 0x00ff00) | 116 | if (stat & 0x00ff00) |
115 | ret = POLLOUT | POLLWRNORM; | 117 | ret = POLLOUT | POLLWRNORM; |
116 | else { | 118 | else { |
117 | ctx->csa.priv1.int_stat_class2_RW &= ~0x10; | 119 | ctx->csa.priv1.int_stat_class2_RW &= |
118 | ctx->csa.priv1.int_mask_class2_RW |= 0x10; | 120 | ~CLASS2_MAILBOX_THRESHOLD_INTR; |
121 | ctx->csa.priv1.int_mask_class2_RW |= | ||
122 | CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR; | ||
119 | } | 123 | } |
120 | } | 124 | } |
121 | spin_unlock_irq(&ctx->csa.register_lock); | 125 | spin_unlock_irq(&ctx->csa.register_lock); |
@@ -139,7 +143,7 @@ static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data) | |||
139 | ret = 4; | 143 | ret = 4; |
140 | } else { | 144 | } else { |
141 | /* make sure we get woken up by the interrupt */ | 145 | /* make sure we get woken up by the interrupt */ |
142 | ctx->csa.priv1.int_mask_class2_RW |= 0x1UL; | 146 | ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR; |
143 | ret = 0; | 147 | ret = 0; |
144 | } | 148 | } |
145 | spin_unlock(&ctx->csa.register_lock); | 149 | spin_unlock(&ctx->csa.register_lock); |
@@ -169,7 +173,8 @@ static int spu_backing_wbox_write(struct spu_context *ctx, u32 data) | |||
169 | } else { | 173 | } else { |
170 | /* make sure we get woken up by the interrupt when space | 174 | /* make sure we get woken up by the interrupt when space |
171 | becomes available */ | 175 | becomes available */ |
172 | ctx->csa.priv1.int_mask_class2_RW |= 0x10; | 176 | ctx->csa.priv1.int_mask_class2_RW |= |
177 | CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR; | ||
173 | ret = 0; | 178 | ret = 0; |
174 | } | 179 | } |
175 | spin_unlock(&ctx->csa.register_lock); | 180 | spin_unlock(&ctx->csa.register_lock); |
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c index a7767e3b0853..64f8540b832c 100644 --- a/arch/powerpc/platforms/cell/spufs/hw_ops.c +++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c | |||
@@ -76,16 +76,18 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx, | |||
76 | if (stat & 0xff0000) | 76 | if (stat & 0xff0000) |
77 | ret |= POLLIN | POLLRDNORM; | 77 | ret |= POLLIN | POLLRDNORM; |
78 | else { | 78 | else { |
79 | spu_int_stat_clear(spu, 2, 0x1); | 79 | spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR); |
80 | spu_int_mask_or(spu, 2, 0x1); | 80 | spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); |
81 | } | 81 | } |
82 | } | 82 | } |
83 | if (events & (POLLOUT | POLLWRNORM)) { | 83 | if (events & (POLLOUT | POLLWRNORM)) { |
84 | if (stat & 0x00ff00) | 84 | if (stat & 0x00ff00) |
85 | ret = POLLOUT | POLLWRNORM; | 85 | ret = POLLOUT | POLLWRNORM; |
86 | else { | 86 | else { |
87 | spu_int_stat_clear(spu, 2, 0x10); | 87 | spu_int_stat_clear(spu, 2, |
88 | spu_int_mask_or(spu, 2, 0x10); | 88 | CLASS2_MAILBOX_THRESHOLD_INTR); |
89 | spu_int_mask_or(spu, 2, | ||
90 | CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR); | ||
89 | } | 91 | } |
90 | } | 92 | } |
91 | spin_unlock_irq(&spu->register_lock); | 93 | spin_unlock_irq(&spu->register_lock); |
@@ -106,7 +108,7 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data) | |||
106 | ret = 4; | 108 | ret = 4; |
107 | } else { | 109 | } else { |
108 | /* make sure we get woken up by the interrupt */ | 110 | /* make sure we get woken up by the interrupt */ |
109 | spu_int_mask_or(spu, 2, 0x1); | 111 | spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); |
110 | ret = 0; | 112 | ret = 0; |
111 | } | 113 | } |
112 | spin_unlock_irq(&spu->register_lock); | 114 | spin_unlock_irq(&spu->register_lock); |
@@ -127,7 +129,7 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data) | |||
127 | } else { | 129 | } else { |
128 | /* make sure we get woken up by the interrupt when space | 130 | /* make sure we get woken up by the interrupt when space |
129 | becomes available */ | 131 | becomes available */ |
130 | spu_int_mask_or(spu, 2, 0x10); | 132 | spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR); |
131 | ret = 0; | 133 | ret = 0; |
132 | } | 134 | } |
133 | spin_unlock_irq(&spu->register_lock); | 135 | spin_unlock_irq(&spu->register_lock); |