diff options
author | Jeremy Kerr <jk@ozlabs.org> | 2007-12-20 02:39:59 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-12-21 03:46:20 -0500 |
commit | d6ad39bc53521275d14fde86bfb94d9b2ddb7a08 (patch) | |
tree | 07dcc592b343395cb7fbfb3053aa21103fb94352 /arch/powerpc/platforms/cell/spufs/run.c | |
parent | 8af30675c3e7b945bbaf6f57b724f246e56eb209 (diff) |
[POWERPC] spufs: rework class 0 and 1 interrupt handling
Based on original patches from
Arnd Bergmann <arnd.bergman@de.ibm.com>; and
Luke Browning <lukebr@linux.vnet.ibm.com>
Currently, spu contexts need to be loaded to the SPU in order to take
class 0 and class 1 exceptions.
This change makes the actual interrupt-handlers much simpler (ie,
set the exception information in the context save area), and defers the
handling code to the spufs_handle_class[01] functions, called from
spufs_run_spu.
This should improve the concurrency of the spu scheduling leading to
greater SPU utilization when SPUs are overcommited.
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/run.c')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 50 |
1 files changed, 34 insertions, 16 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index b3cc1dd72185..3b3de6c7ee5b 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -15,7 +15,30 @@ void spufs_stop_callback(struct spu *spu) | |||
15 | { | 15 | { |
16 | struct spu_context *ctx = spu->ctx; | 16 | struct spu_context *ctx = spu->ctx; |
17 | 17 | ||
18 | wake_up_all(&ctx->stop_wq); | 18 | /* |
19 | * It should be impossible to preempt a context while an exception | ||
20 | * is being processed, since the context switch code is specially | ||
21 | * coded to deal with interrupts ... But, just in case, sanity check | ||
22 | * the context pointer. It is OK to return doing nothing since | ||
23 | * the exception will be regenerated when the context is resumed. | ||
24 | */ | ||
25 | if (ctx) { | ||
26 | /* Copy exception arguments into module specific structure */ | ||
27 | ctx->csa.class_0_pending = spu->class_0_pending; | ||
28 | ctx->csa.dsisr = spu->dsisr; | ||
29 | ctx->csa.dar = spu->dar; | ||
30 | |||
31 | /* ensure that the exception status has hit memory before a | ||
32 | * thread waiting on the context's stop queue is woken */ | ||
33 | smp_wmb(); | ||
34 | |||
35 | wake_up_all(&ctx->stop_wq); | ||
36 | } | ||
37 | |||
38 | /* Clear callback arguments from spu structure */ | ||
39 | spu->class_0_pending = 0; | ||
40 | spu->dsisr = 0; | ||
41 | spu->dar = 0; | ||
19 | } | 42 | } |
20 | 43 | ||
21 | static inline int spu_stopped(struct spu_context *ctx, u32 *stat) | 44 | static inline int spu_stopped(struct spu_context *ctx, u32 *stat) |
@@ -29,9 +52,9 @@ static inline int spu_stopped(struct spu_context *ctx, u32 *stat) | |||
29 | if (ctx->state != SPU_STATE_RUNNABLE || | 52 | if (ctx->state != SPU_STATE_RUNNABLE || |
30 | test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) | 53 | test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) |
31 | return 1; | 54 | return 1; |
32 | pte_fault = spu->dsisr & | 55 | pte_fault = ctx->csa.dsisr & |
33 | (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); | 56 | (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); |
34 | return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ? | 57 | return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || ctx->csa.class_0_pending) ? |
35 | 1 : 0; | 58 | 1 : 0; |
36 | } | 59 | } |
37 | 60 | ||
@@ -287,18 +310,6 @@ static int spu_process_callback(struct spu_context *ctx) | |||
287 | return ret; | 310 | return ret; |
288 | } | 311 | } |
289 | 312 | ||
290 | static inline int spu_process_events(struct spu_context *ctx) | ||
291 | { | ||
292 | struct spu *spu = ctx->spu; | ||
293 | int ret = 0; | ||
294 | |||
295 | if (spu->class_0_pending) | ||
296 | ret = spu_irq_class_0_bottom(spu); | ||
297 | if (!ret && signal_pending(current)) | ||
298 | ret = -ERESTARTSYS; | ||
299 | return ret; | ||
300 | } | ||
301 | |||
302 | long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) | 313 | long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) |
303 | { | 314 | { |
304 | int ret; | 315 | int ret; |
@@ -364,13 +375,20 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) | |||
364 | if (ret) | 375 | if (ret) |
365 | break; | 376 | break; |
366 | 377 | ||
378 | ret = spufs_handle_class0(ctx); | ||
379 | if (ret) | ||
380 | break; | ||
381 | |||
367 | if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { | 382 | if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { |
368 | ret = spu_reacquire_runnable(ctx, npc, &status); | 383 | ret = spu_reacquire_runnable(ctx, npc, &status); |
369 | if (ret) | 384 | if (ret) |
370 | goto out2; | 385 | goto out2; |
371 | continue; | 386 | continue; |
372 | } | 387 | } |
373 | ret = spu_process_events(ctx); | 388 | |
389 | if (signal_pending(current)) | ||
390 | ret = -ERESTARTSYS; | ||
391 | |||
374 | 392 | ||
375 | } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | | 393 | } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | |
376 | SPU_STATUS_STOPPED_BY_HALT | | 394 | SPU_STATUS_STOPPED_BY_HALT | |