diff options
author | Arnd Bergmann <arnd.bergmann@de.ibm.com> | 2007-04-23 15:08:15 -0400 |
---|---|---|
committer | Arnd Bergmann <arnd@klappe.arndb.de> | 2007-04-23 15:18:55 -0400 |
commit | 57dace2391ba10135e38457904121e7ef34d0c83 (patch) | |
tree | 1be720be47bd6f1d929e9242b8a89a8f2e5fe61d /arch/powerpc/platforms/cell/spufs/run.c | |
parent | 62c05d583ec016c40011462d5f03b072bfbd3dc7 (diff) |
[POWERPC] spufs: make spu page faults not block scheduling
Until now, we have always entered the spu page fault handler
with a mutex for the spu context held. This has multiple
bad side-effects:
- it becomes impossible to suspend the context during
page faults
- if an spu program attempts to access its own mmio
areas through DMA, we get an immediate livelock when
the nopage function tries to acquire the same mutex
This patch makes the page fault logic operate on a
struct spu_context instead of a struct spu, and moves it
from spu_base.c to a new file fault.c inside of spufs.
We now also need to copy the dar and dsisr contents
of the last fault into the saved context to have it
accessible in case we schedule out the context before
activating the page fault handler.
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/run.c')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 28 |
1 files changed, 4 insertions, 24 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 7df5202c9a90..1a8195bf75d5 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -18,27 +18,6 @@ void spufs_stop_callback(struct spu *spu) | |||
18 | wake_up_all(&ctx->stop_wq); | 18 | wake_up_all(&ctx->stop_wq); |
19 | } | 19 | } |
20 | 20 | ||
21 | void spufs_dma_callback(struct spu *spu, int type) | ||
22 | { | ||
23 | struct spu_context *ctx = spu->ctx; | ||
24 | |||
25 | if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { | ||
26 | ctx->event_return |= type; | ||
27 | wake_up_all(&ctx->stop_wq); | ||
28 | } else { | ||
29 | switch (type) { | ||
30 | case SPE_EVENT_DMA_ALIGNMENT: | ||
31 | case SPE_EVENT_SPE_DATA_STORAGE: | ||
32 | case SPE_EVENT_INVALID_DMA: | ||
33 | force_sig(SIGBUS, /* info, */ current); | ||
34 | break; | ||
35 | case SPE_EVENT_SPE_ERROR: | ||
36 | force_sig(SIGILL, /* info */ current); | ||
37 | break; | ||
38 | } | ||
39 | } | ||
40 | } | ||
41 | |||
42 | static inline int spu_stopped(struct spu_context *ctx, u32 * stat) | 21 | static inline int spu_stopped(struct spu_context *ctx, u32 * stat) |
43 | { | 22 | { |
44 | struct spu *spu; | 23 | struct spu *spu; |
@@ -294,11 +273,8 @@ int spu_process_callback(struct spu_context *ctx) | |||
294 | static inline int spu_process_events(struct spu_context *ctx) | 273 | static inline int spu_process_events(struct spu_context *ctx) |
295 | { | 274 | { |
296 | struct spu *spu = ctx->spu; | 275 | struct spu *spu = ctx->spu; |
297 | u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED; | ||
298 | int ret = 0; | 276 | int ret = 0; |
299 | 277 | ||
300 | if (spu->dsisr & pte_fault) | ||
301 | ret = spu_irq_class_1_bottom(spu); | ||
302 | if (spu->class_0_pending) | 278 | if (spu->class_0_pending) |
303 | ret = spu_irq_class_0_bottom(spu); | 279 | ret = spu_irq_class_0_bottom(spu); |
304 | if (!ret && signal_pending(current)) | 280 | if (!ret && signal_pending(current)) |
@@ -332,6 +308,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, | |||
332 | break; | 308 | break; |
333 | status &= ~SPU_STATUS_STOPPED_BY_STOP; | 309 | status &= ~SPU_STATUS_STOPPED_BY_STOP; |
334 | } | 310 | } |
311 | ret = spufs_handle_class1(ctx); | ||
312 | if (ret) | ||
313 | break; | ||
314 | |||
335 | if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { | 315 | if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { |
336 | ret = spu_reacquire_runnable(ctx, npc, &status); | 316 | ret = spu_reacquire_runnable(ctx, npc, &status); |
337 | if (ret) { | 317 | if (ret) { |