diff options
author | Luke Browning <lukebr@linux.vnet.ibm.com> | 2008-04-27 14:41:55 -0400 |
---|---|---|
committer | Jeremy Kerr <jk@ozlabs.org> | 2008-05-04 23:33:44 -0400 |
commit | f3d69e0507f84903059d456c5d19f10b2df3ac69 (patch) | |
tree | f8aa4062bc1a0939d7bdb3a634c01869f2bb32a2 /arch/powerpc/platforms/cell | |
parent | 7a2142002f29a7b398c49da9bdec712dc57087c7 (diff) |
[POWERPC] spufs: fix concurrent delivery of class 0 & 1 exceptions
SPU class 0 & 1 exceptions may occur in parallel, so we may end up
overwriting csa.dsisr.
This change adds dedicated fields for each class to the spu and the spu
context so that fault data is not overwritten.
Signed-off-by: Luke Browning <lukebr@linux.vnet.ibm.com>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 27 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/fault.c | 17 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 29 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 2 |
4 files changed, 49 insertions, 26 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 6bab44b7716b..b9ae675640d0 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -226,11 +226,13 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) | |||
226 | return 0; | 226 | return 0; |
227 | } | 227 | } |
228 | 228 | ||
229 | spu->class_0_pending = 0; | 229 | spu->class_1_dar = ea; |
230 | spu->dar = ea; | 230 | spu->class_1_dsisr = dsisr; |
231 | spu->dsisr = dsisr; | 231 | |
232 | spu->stop_callback(spu, 1); | ||
232 | 233 | ||
233 | spu->stop_callback(spu); | 234 | spu->class_1_dar = 0; |
235 | spu->class_1_dsisr = 0; | ||
234 | 236 | ||
235 | return 0; | 237 | return 0; |
236 | } | 238 | } |
@@ -318,11 +320,15 @@ spu_irq_class_0(int irq, void *data) | |||
318 | stat = spu_int_stat_get(spu, 0) & mask; | 320 | stat = spu_int_stat_get(spu, 0) & mask; |
319 | 321 | ||
320 | spu->class_0_pending |= stat; | 322 | spu->class_0_pending |= stat; |
321 | spu->dsisr = spu_mfc_dsisr_get(spu); | 323 | spu->class_0_dsisr = spu_mfc_dsisr_get(spu); |
322 | spu->dar = spu_mfc_dar_get(spu); | 324 | spu->class_0_dar = spu_mfc_dar_get(spu); |
323 | spin_unlock(&spu->register_lock); | 325 | spin_unlock(&spu->register_lock); |
324 | 326 | ||
325 | spu->stop_callback(spu); | 327 | spu->stop_callback(spu, 0); |
328 | |||
329 | spu->class_0_pending = 0; | ||
330 | spu->class_0_dsisr = 0; | ||
331 | spu->class_0_dar = 0; | ||
326 | 332 | ||
327 | spu_int_stat_clear(spu, 0, stat); | 333 | spu_int_stat_clear(spu, 0, stat); |
328 | 334 | ||
@@ -363,6 +369,9 @@ spu_irq_class_1(int irq, void *data) | |||
363 | if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR) | 369 | if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR) |
364 | ; | 370 | ; |
365 | 371 | ||
372 | spu->class_1_dsisr = 0; | ||
373 | spu->class_1_dar = 0; | ||
374 | |||
366 | return stat ? IRQ_HANDLED : IRQ_NONE; | 375 | return stat ? IRQ_HANDLED : IRQ_NONE; |
367 | } | 376 | } |
368 | 377 | ||
@@ -396,10 +405,10 @@ spu_irq_class_2(int irq, void *data) | |||
396 | spu->ibox_callback(spu); | 405 | spu->ibox_callback(spu); |
397 | 406 | ||
398 | if (stat & CLASS2_SPU_STOP_INTR) | 407 | if (stat & CLASS2_SPU_STOP_INTR) |
399 | spu->stop_callback(spu); | 408 | spu->stop_callback(spu, 2); |
400 | 409 | ||
401 | if (stat & CLASS2_SPU_HALT_INTR) | 410 | if (stat & CLASS2_SPU_HALT_INTR) |
402 | spu->stop_callback(spu); | 411 | spu->stop_callback(spu, 2); |
403 | 412 | ||
404 | if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) | 413 | if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) |
405 | spu->mfc_callback(spu); | 414 | spu->mfc_callback(spu); |
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c index e46d300e21a5..f093a581ac74 100644 --- a/arch/powerpc/platforms/cell/spufs/fault.c +++ b/arch/powerpc/platforms/cell/spufs/fault.c | |||
@@ -83,13 +83,18 @@ int spufs_handle_class0(struct spu_context *ctx) | |||
83 | return 0; | 83 | return 0; |
84 | 84 | ||
85 | if (stat & CLASS0_DMA_ALIGNMENT_INTR) | 85 | if (stat & CLASS0_DMA_ALIGNMENT_INTR) |
86 | spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT); | 86 | spufs_handle_event(ctx, ctx->csa.class_0_dar, |
87 | SPE_EVENT_DMA_ALIGNMENT); | ||
87 | 88 | ||
88 | if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) | 89 | if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) |
89 | spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA); | 90 | spufs_handle_event(ctx, ctx->csa.class_0_dar, |
91 | SPE_EVENT_INVALID_DMA); | ||
90 | 92 | ||
91 | if (stat & CLASS0_SPU_ERROR_INTR) | 93 | if (stat & CLASS0_SPU_ERROR_INTR) |
92 | spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR); | 94 | spufs_handle_event(ctx, ctx->csa.class_0_dar, |
95 | SPE_EVENT_SPE_ERROR); | ||
96 | |||
97 | ctx->csa.class_0_pending = 0; | ||
93 | 98 | ||
94 | return -EIO; | 99 | return -EIO; |
95 | } | 100 | } |
@@ -119,8 +124,8 @@ int spufs_handle_class1(struct spu_context *ctx) | |||
119 | * in time, we can still expect to get the same fault | 124 | * in time, we can still expect to get the same fault |
120 | * the immediately after the context restore. | 125 | * the immediately after the context restore. |
121 | */ | 126 | */ |
122 | ea = ctx->csa.dar; | 127 | ea = ctx->csa.class_1_dar; |
123 | dsisr = ctx->csa.dsisr; | 128 | dsisr = ctx->csa.class_1_dsisr; |
124 | 129 | ||
125 | if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) | 130 | if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) |
126 | return 0; | 131 | return 0; |
@@ -158,7 +163,7 @@ int spufs_handle_class1(struct spu_context *ctx) | |||
158 | * time slicing will not preempt the context while the page fault | 163 | * time slicing will not preempt the context while the page fault |
159 | * handler is running. Context switch code removes mappings. | 164 | * handler is running. Context switch code removes mappings. |
160 | */ | 165 | */ |
161 | ctx->csa.dar = ctx->csa.dsisr = 0; | 166 | ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0; |
162 | 167 | ||
163 | /* | 168 | /* |
164 | * If we handled the fault successfully and are in runnable | 169 | * If we handled the fault successfully and are in runnable |
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index e764a43b544e..b7493b865812 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -11,7 +11,7 @@ | |||
11 | #include "spufs.h" | 11 | #include "spufs.h" |
12 | 12 | ||
13 | /* interrupt-level stop callback function. */ | 13 | /* interrupt-level stop callback function. */ |
14 | void spufs_stop_callback(struct spu *spu) | 14 | void spufs_stop_callback(struct spu *spu, int irq) |
15 | { | 15 | { |
16 | struct spu_context *ctx = spu->ctx; | 16 | struct spu_context *ctx = spu->ctx; |
17 | 17 | ||
@@ -24,9 +24,19 @@ void spufs_stop_callback(struct spu *spu) | |||
24 | */ | 24 | */ |
25 | if (ctx) { | 25 | if (ctx) { |
26 | /* Copy exception arguments into module specific structure */ | 26 | /* Copy exception arguments into module specific structure */ |
27 | ctx->csa.class_0_pending = spu->class_0_pending; | 27 | switch(irq) { |
28 | ctx->csa.dsisr = spu->dsisr; | 28 | case 0 : |
29 | ctx->csa.dar = spu->dar; | 29 | ctx->csa.class_0_pending = spu->class_0_pending; |
30 | ctx->csa.class_0_dsisr = spu->class_0_dsisr; | ||
31 | ctx->csa.class_0_dar = spu->class_0_dar; | ||
32 | break; | ||
33 | case 1 : | ||
34 | ctx->csa.class_1_dsisr = spu->class_1_dsisr; | ||
35 | ctx->csa.class_1_dar = spu->class_1_dar; | ||
36 | break; | ||
37 | case 2 : | ||
38 | break; | ||
39 | } | ||
30 | 40 | ||
31 | /* ensure that the exception status has hit memory before a | 41 | /* ensure that the exception status has hit memory before a |
32 | * thread waiting on the context's stop queue is woken */ | 42 | * thread waiting on the context's stop queue is woken */ |
@@ -34,11 +44,6 @@ void spufs_stop_callback(struct spu *spu) | |||
34 | 44 | ||
35 | wake_up_all(&ctx->stop_wq); | 45 | wake_up_all(&ctx->stop_wq); |
36 | } | 46 | } |
37 | |||
38 | /* Clear callback arguments from spu structure */ | ||
39 | spu->class_0_pending = 0; | ||
40 | spu->dsisr = 0; | ||
41 | spu->dar = 0; | ||
42 | } | 47 | } |
43 | 48 | ||
44 | int spu_stopped(struct spu_context *ctx, u32 *stat) | 49 | int spu_stopped(struct spu_context *ctx, u32 *stat) |
@@ -56,7 +61,11 @@ int spu_stopped(struct spu_context *ctx, u32 *stat) | |||
56 | if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped)) | 61 | if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped)) |
57 | return 1; | 62 | return 1; |
58 | 63 | ||
59 | dsisr = ctx->csa.dsisr; | 64 | dsisr = ctx->csa.class_0_dsisr; |
65 | if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) | ||
66 | return 1; | ||
67 | |||
68 | dsisr = ctx->csa.class_1_dsisr; | ||
60 | if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) | 69 | if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) |
61 | return 1; | 70 | return 1; |
62 | 71 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index dc3a215a6a22..454c277c1457 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h | |||
@@ -332,7 +332,7 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data); | |||
332 | /* irq callback funcs. */ | 332 | /* irq callback funcs. */ |
333 | void spufs_ibox_callback(struct spu *spu); | 333 | void spufs_ibox_callback(struct spu *spu); |
334 | void spufs_wbox_callback(struct spu *spu); | 334 | void spufs_wbox_callback(struct spu *spu); |
335 | void spufs_stop_callback(struct spu *spu); | 335 | void spufs_stop_callback(struct spu *spu, int irq); |
336 | void spufs_mfc_callback(struct spu *spu); | 336 | void spufs_mfc_callback(struct spu *spu); |
337 | void spufs_dma_callback(struct spu *spu, int type); | 337 | void spufs_dma_callback(struct spu *spu, int type); |
338 | 338 | ||