aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2005-12-05 22:52:27 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-08 22:53:01 -0500
commit3a843d7cd30ab6815610d9d6aa66b56df0ee1228 (patch)
treeb344400a51bf794ec10c6a1fb788e1244969a00f /arch
parent2a911f0bb73e67826062b7d073dd7367ca449724 (diff)
[PATCH] spufs: fix mailbox polling
Handling mailbox interrupts was broken in multiple respects, the combination of which was hiding the bugs most of the time. - The ibox interrupt mask was open initially even though there are no waiters on a newly created SPU. - Acknowledging the mailbox interrupt did not work because it is level triggered and the mailbox data is never retrieved from inside the interrupt handler. - The interrupt handler delivered interrupts with a disabled mask if another interrupt is triggered for the same class but a different mask. - The poll function did not enable the interrupt if it had not been enabled, so we might run into the poll timeout if none of the other bugs saved us and no signal was delivered. We probably still have a similar problem with blocking read/write on mailbox files, but that will result in extra wakeup in the worst case, not in incorrect behaviour. Signed-off-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c37
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c24
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c41
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c4
6 files changed, 95 insertions, 24 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 8abd4bd19665..f9da79eb3db0 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -202,12 +202,15 @@ spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
202int 202int
203spu_irq_class_0_bottom(struct spu *spu) 203spu_irq_class_0_bottom(struct spu *spu)
204{ 204{
205 unsigned long stat; 205 unsigned long stat, mask;
206 206
207 spu->class_0_pending = 0; 207 spu->class_0_pending = 0;
208 208
209 mask = in_be64(&spu->priv1->int_mask_class0_RW);
209 stat = in_be64(&spu->priv1->int_stat_class0_RW); 210 stat = in_be64(&spu->priv1->int_stat_class0_RW);
210 211
212 stat &= mask;
213
211 if (stat & 1) /* invalid MFC DMA */ 214 if (stat & 1) /* invalid MFC DMA */
212 __spu_trap_invalid_dma(spu); 215 __spu_trap_invalid_dma(spu);
213 216
@@ -263,13 +266,15 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
263{ 266{
264 struct spu *spu; 267 struct spu *spu;
265 unsigned long stat; 268 unsigned long stat;
269 unsigned long mask;
266 270
267 spu = data; 271 spu = data;
268 stat = in_be64(&spu->priv1->int_stat_class2_RW); 272 stat = in_be64(&spu->priv1->int_stat_class2_RW);
273 mask = in_be64(&spu->priv1->int_mask_class2_RW);
269 274
270 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, 275 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
271 in_be64(&spu->priv1->int_mask_class2_RW));
272 276
277 stat &= mask;
273 278
274 if (stat & 1) /* PPC core mailbox */ 279 if (stat & 1) /* PPC core mailbox */
275 __spu_trap_mailbox(spu); 280 __spu_trap_mailbox(spu);
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index 66567c109965..a5c489a53c61 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -32,6 +32,7 @@
32#include <linux/smp_lock.h> 32#include <linux/smp_lock.h>
33#include <linux/stddef.h> 33#include <linux/stddef.h>
34#include <linux/unistd.h> 34#include <linux/unistd.h>
35#include <linux/poll.h>
35 36
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/spu.h> 38#include <asm/spu.h>
@@ -87,6 +88,41 @@ static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
87 return ctx->csa.prob.mb_stat_R; 88 return ctx->csa.prob.mb_stat_R;
88} 89}
89 90
91static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
92 unsigned int events)
93{
94 int ret;
95 u32 stat;
96
97 ret = 0;
98 spin_lock_irq(&ctx->csa.register_lock);
99 stat = ctx->csa.prob.mb_stat_R;
100
101 /* if the requested event is there, return the poll
102 mask, otherwise enable the interrupt to get notified,
103 but first mark any pending interrupts as done so
104 we don't get woken up unnecessarily */
105
106 if (events & (POLLIN | POLLRDNORM)) {
107 if (stat & 0xff0000)
108 ret |= POLLIN | POLLRDNORM;
109 else {
110 ctx->csa.priv1.int_stat_class0_RW &= ~0x1;
111 ctx->csa.priv1.int_mask_class2_RW |= 0x1;
112 }
113 }
114 if (events & (POLLOUT | POLLWRNORM)) {
115 if (stat & 0x00ff00)
116 ret = POLLOUT | POLLWRNORM;
117 else {
118 ctx->csa.priv1.int_stat_class0_RW &= ~0x10;
119 ctx->csa.priv1.int_mask_class2_RW |= 0x10;
120 }
121 }
122 spin_unlock_irq(&ctx->csa.register_lock);
123 return ret;
124}
125
90static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data) 126static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
91{ 127{
92 int ret; 128 int ret;
@@ -252,6 +288,7 @@ static void spu_backing_runcntl_stop(struct spu_context *ctx)
252struct spu_context_ops spu_backing_ops = { 288struct spu_context_ops spu_backing_ops = {
253 .mbox_read = spu_backing_mbox_read, 289 .mbox_read = spu_backing_mbox_read,
254 .mbox_stat_read = spu_backing_mbox_stat_read, 290 .mbox_stat_read = spu_backing_mbox_stat_read,
291 .mbox_stat_poll = spu_backing_mbox_stat_poll,
255 .ibox_read = spu_backing_ibox_read, 292 .ibox_read = spu_backing_ibox_read,
256 .wbox_write = spu_backing_wbox_write, 293 .wbox_write = spu_backing_wbox_write,
257 .signal1_read = spu_backing_signal1_read, 294 .signal1_read = spu_backing_signal1_read,
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index af5adc372224..9738de727f32 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -389,20 +389,13 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
389static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) 389static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
390{ 390{
391 struct spu_context *ctx = file->private_data; 391 struct spu_context *ctx = file->private_data;
392 u32 mbox_stat;
393 unsigned int mask; 392 unsigned int mask;
394 393
395 spu_acquire(ctx);
396
397 mbox_stat = ctx->ops->mbox_stat_read(ctx);
398
399 spu_release(ctx);
400
401 poll_wait(file, &ctx->ibox_wq, wait); 394 poll_wait(file, &ctx->ibox_wq, wait);
402 395
403 mask = 0; 396 spu_acquire(ctx);
404 if (mbox_stat & 0xff0000) 397 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
405 mask |= POLLIN | POLLRDNORM; 398 spu_release(ctx);
406 399
407 return mask; 400 return mask;
408} 401}
@@ -494,18 +487,13 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
494static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) 487static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
495{ 488{
496 struct spu_context *ctx = file->private_data; 489 struct spu_context *ctx = file->private_data;
497 u32 mbox_stat;
498 unsigned int mask; 490 unsigned int mask;
499 491
500 spu_acquire(ctx);
501 mbox_stat = ctx->ops->mbox_stat_read(ctx);
502 spu_release(ctx);
503
504 poll_wait(file, &ctx->wbox_wq, wait); 492 poll_wait(file, &ctx->wbox_wq, wait);
505 493
506 mask = 0; 494 spu_acquire(ctx);
507 if (mbox_stat & 0x00ff00) 495 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
508 mask = POLLOUT | POLLWRNORM; 496 spu_release(ctx);
509 497
510 return mask; 498 return mask;
511} 499}
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index 68812415ee29..9a53e29f9d7e 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -24,7 +24,7 @@
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/vmalloc.h> 27#include <linux/poll.h>
28#include <linux/smp.h> 28#include <linux/smp.h>
29#include <linux/smp_lock.h> 29#include <linux/smp_lock.h>
30#include <linux/stddef.h> 30#include <linux/stddef.h>
@@ -58,6 +58,44 @@ static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
58 return in_be32(&ctx->spu->problem->mb_stat_R); 58 return in_be32(&ctx->spu->problem->mb_stat_R);
59} 59}
60 60
61static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
62 unsigned int events)
63{
64 struct spu *spu = ctx->spu;
65 struct spu_priv1 __iomem *priv1 = spu->priv1;
66 int ret = 0;
67 u32 stat;
68
69 spin_lock_irq(&spu->register_lock);
70 stat = in_be32(&spu->problem->mb_stat_R);
71
72 /* if the requested event is there, return the poll
73 mask, otherwise enable the interrupt to get notified,
74 but first mark any pending interrupts as done so
75 we don't get woken up unnecessarily */
76
77 if (events & (POLLIN | POLLRDNORM)) {
78 if (stat & 0xff0000)
79 ret |= POLLIN | POLLRDNORM;
80 else {
81 out_be64(&priv1->int_stat_class2_RW, 0x1);
82 out_be64(&priv1->int_mask_class2_RW,
83 in_be64(&priv1->int_mask_class2_RW) | 0x1);
84 }
85 }
86 if (events & (POLLOUT | POLLWRNORM)) {
87 if (stat & 0x00ff00)
88 ret = POLLOUT | POLLWRNORM;
89 else {
90 out_be64(&priv1->int_stat_class2_RW, 0x10);
91 out_be64(&priv1->int_mask_class2_RW,
92 in_be64(&priv1->int_mask_class2_RW) | 0x10);
93 }
94 }
95 spin_unlock_irq(&spu->register_lock);
96 return ret;
97}
98
61static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data) 99static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
62{ 100{
63 struct spu *spu = ctx->spu; 101 struct spu *spu = ctx->spu;
@@ -204,6 +242,7 @@ static void spu_hw_runcntl_stop(struct spu_context *ctx)
204struct spu_context_ops spu_hw_ops = { 242struct spu_context_ops spu_hw_ops = {
205 .mbox_read = spu_hw_mbox_read, 243 .mbox_read = spu_hw_mbox_read,
206 .mbox_stat_read = spu_hw_mbox_stat_read, 244 .mbox_stat_read = spu_hw_mbox_stat_read,
245 .mbox_stat_poll = spu_hw_mbox_stat_poll,
207 .ibox_read = spu_hw_ibox_read, 246 .ibox_read = spu_hw_ibox_read,
208 .wbox_write = spu_hw_wbox_write, 247 .wbox_write = spu_hw_wbox_write,
209 .signal1_read = spu_hw_signal1_read, 248 .signal1_read = spu_hw_signal1_read,
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 5bb75f22f722..17cae5e5fdf5 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -66,6 +66,8 @@ struct spu_context {
66struct spu_context_ops { 66struct spu_context_ops {
67 int (*mbox_read) (struct spu_context * ctx, u32 * data); 67 int (*mbox_read) (struct spu_context * ctx, u32 * data);
68 u32(*mbox_stat_read) (struct spu_context * ctx); 68 u32(*mbox_stat_read) (struct spu_context * ctx);
69 unsigned int (*mbox_stat_poll)(struct spu_context *ctx,
70 unsigned int events);
69 int (*ibox_read) (struct spu_context * ctx, u32 * data); 71 int (*ibox_read) (struct spu_context * ctx, u32 * data);
70 int (*wbox_write) (struct spu_context * ctx, u32 data); 72 int (*wbox_write) (struct spu_context * ctx, u32 data);
71 u32(*signal1_read) (struct spu_context * ctx); 73 u32(*signal1_read) (struct spu_context * ctx);
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 51266257b0a5..010a9fe55ef8 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -2155,8 +2155,8 @@ static void init_priv1(struct spu_state *csa)
2155 CLASS0_ENABLE_SPU_ERROR_INTR; 2155 CLASS0_ENABLE_SPU_ERROR_INTR;
2156 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR | 2156 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2157 CLASS1_ENABLE_STORAGE_FAULT_INTR; 2157 CLASS1_ENABLE_STORAGE_FAULT_INTR;
2158 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_MAILBOX_INTR | 2158 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2159 CLASS2_ENABLE_SPU_STOP_INTR | CLASS2_ENABLE_SPU_HALT_INTR; 2159 CLASS2_ENABLE_SPU_HALT_INTR;
2160} 2160}
2161 2161
2162static void init_priv2(struct spu_state *csa) 2162static void init_priv2(struct spu_state *csa)