aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spufs
diff options
context:
space:
mode:
authorArnd Bergmann <arnd.bergmann@de.ibm.com>2007-04-23 15:08:15 -0400
committerArnd Bergmann <arnd@klappe.arndb.de>2007-04-23 15:18:55 -0400
commit57dace2391ba10135e38457904121e7ef34d0c83 (patch)
tree1be720be47bd6f1d929e9242b8a89a8f2e5fe61d /arch/powerpc/platforms/cell/spufs
parent62c05d583ec016c40011462d5f03b072bfbd3dc7 (diff)
[POWERPC] spufs: make spu page faults not block scheduling
Until now, we have always entered the spu page fault handler with a mutex for the spu context held. This has multiple bad side-effects: - it becomes impossible to suspend the context during page faults - if an spu program attempts to access its own mmio areas through DMA, we get an immediate livelock when the nopage function tries to acquire the same mutex This patch makes the page fault logic operate on a struct spu_context instead of a struct spu, and moves it from spu_base.c to a new file fault.c inside of spufs. We now also need to copy the dar and dsisr contents of the last fault into the saved context to have it accessible in case we schedule out the context before activating the page fault handler. Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs')
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c193
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c28
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h4
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c8
7 files changed, 223 insertions, 27 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index 472217d19fa..2cd89c11af5 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,4 +1,4 @@
1obj-y += switch.o 1obj-y += switch.o fault.o
2 2
3obj-$(CONFIG_SPU_FS) += spufs.o 3obj-$(CONFIG_SPU_FS) += spufs.o
4spufs-y += inode.o file.o context.o syscalls.o coredump.o 4spufs-y += inode.o file.o context.o syscalls.o coredump.o
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index 1898f0d3a8b..3322528fa6e 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -350,6 +350,11 @@ static int spu_backing_send_mfc_command(struct spu_context *ctx,
350 return ret; 350 return ret;
351} 351}
352 352
353static void spu_backing_restart_dma(struct spu_context *ctx)
354{
355 /* nothing to do here */
356}
357
353struct spu_context_ops spu_backing_ops = { 358struct spu_context_ops spu_backing_ops = {
354 .mbox_read = spu_backing_mbox_read, 359 .mbox_read = spu_backing_mbox_read,
355 .mbox_stat_read = spu_backing_mbox_stat_read, 360 .mbox_stat_read = spu_backing_mbox_stat_read,
@@ -376,4 +381,5 @@ struct spu_context_ops spu_backing_ops = {
376 .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus, 381 .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
377 .get_mfc_free_elements = spu_backing_get_mfc_free_elements, 382 .get_mfc_free_elements = spu_backing_get_mfc_free_elements,
378 .send_mfc_command = spu_backing_send_mfc_command, 383 .send_mfc_command = spu_backing_send_mfc_command,
384 .restart_dma = spu_backing_restart_dma,
379}; 385};
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
new file mode 100644
index 00000000000..182dc914cbc
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -0,0 +1,193 @@
1/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/module.h>
25
26#include <asm/spu.h>
27#include <asm/spu_csa.h>
28
29#include "spufs.h"
30
31/*
32 * This ought to be kept in sync with the powerpc specific do_page_fault
33 * function. Currently, there are a few corner cases that we haven't had
34 * to handle fortunately.
35 */
36static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr)
37{
38 struct vm_area_struct *vma;
39 unsigned long is_write;
40 int ret;
41
42#if 0
43 if (!IS_VALID_EA(ea)) {
44 return -EFAULT;
45 }
46#endif /* XXX */
47 if (mm == NULL) {
48 return -EFAULT;
49 }
50 if (mm->pgd == NULL) {
51 return -EFAULT;
52 }
53
54 down_read(&mm->mmap_sem);
55 vma = find_vma(mm, ea);
56 if (!vma)
57 goto bad_area;
58 if (vma->vm_start <= ea)
59 goto good_area;
60 if (!(vma->vm_flags & VM_GROWSDOWN))
61 goto bad_area;
62 if (expand_stack(vma, ea))
63 goto bad_area;
64good_area:
65 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
66 if (is_write) {
67 if (!(vma->vm_flags & VM_WRITE))
68 goto bad_area;
69 } else {
70 if (dsisr & MFC_DSISR_ACCESS_DENIED)
71 goto bad_area;
72 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
73 goto bad_area;
74 }
75 ret = 0;
76 switch (handle_mm_fault(mm, vma, ea, is_write)) {
77 case VM_FAULT_MINOR:
78 current->min_flt++;
79 break;
80 case VM_FAULT_MAJOR:
81 current->maj_flt++;
82 break;
83 case VM_FAULT_SIGBUS:
84 ret = -EFAULT;
85 goto bad_area;
86 case VM_FAULT_OOM:
87 ret = -ENOMEM;
88 goto bad_area;
89 default:
90 BUG();
91 }
92 up_read(&mm->mmap_sem);
93 return ret;
94
95bad_area:
96 up_read(&mm->mmap_sem);
97 return -EFAULT;
98}
99
100static void spufs_handle_dma_error(struct spu_context *ctx, int type)
101{
102 if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
103 ctx->event_return |= type;
104 wake_up_all(&ctx->stop_wq);
105 } else {
106 switch (type) {
107 case SPE_EVENT_DMA_ALIGNMENT:
108 case SPE_EVENT_SPE_DATA_STORAGE:
109 case SPE_EVENT_INVALID_DMA:
110 force_sig(SIGBUS, /* info, */ current);
111 break;
112 case SPE_EVENT_SPE_ERROR:
113 force_sig(SIGILL, /* info */ current);
114 break;
115 }
116 }
117}
118
119void spufs_dma_callback(struct spu *spu, int type)
120{
121 spufs_handle_dma_error(spu->ctx, type);
122}
123EXPORT_SYMBOL_GPL(spufs_dma_callback);
124
125/*
126 * bottom half handler for page faults, we can't do this from
127 * interrupt context, since we might need to sleep.
128 * we also need to give up the mutex so we can get scheduled
129 * out while waiting for the backing store.
130 *
131 * TODO: try calling hash_page from the interrupt handler first
132 * in order to speed up the easy case.
133 */
134int spufs_handle_class1(struct spu_context *ctx)
135{
136 u64 ea, dsisr, access;
137 unsigned long flags;
138 int ret;
139
140 /*
141 * dar and dsisr get passed from the registers
142 * to the spu_context, to this function, but not
143 * back to the spu if it gets scheduled again.
144 *
145 * if we don't handle the fault for a saved context
146 * in time, we can still expect to get the same fault
147 * the immediately after the context restore.
148 */
149 if (ctx->state == SPU_STATE_RUNNABLE) {
150 ea = ctx->spu->dar;
151 dsisr = ctx->spu->dsisr;
152 ctx->spu->dar= ctx->spu->dsisr = 0;
153 } else {
154 ea = ctx->csa.priv1.mfc_dar_RW;
155 dsisr = ctx->csa.priv1.mfc_dsisr_RW;
156 ctx->csa.priv1.mfc_dar_RW = 0;
157 ctx->csa.priv1.mfc_dsisr_RW = 0;
158 }
159
160 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
161 return 0;
162
163 pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea,
164 dsisr, ctx->state);
165
166 /* we must not hold the lock when entering spu_handle_mm_fault */
167 spu_release(ctx);
168
169 access = (_PAGE_PRESENT | _PAGE_USER);
170 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
171 local_irq_save(flags);
172 ret = hash_page(ea, access, 0x300);
173 local_irq_restore(flags);
174
175 /* hashing failed, so try the actual fault handler */
176 if (ret)
177 ret = spu_handle_mm_fault(current->mm, ea, dsisr);
178
179 spu_acquire(ctx);
180 /*
181 * If we handled the fault successfully and are in runnable
182 * state, restart the DMA.
183 * In case of unhandled error report the problem to user space.
184 */
185 if (!ret) {
186 if (ctx->spu)
187 ctx->ops->restart_dma(ctx);
188 } else
189 spufs_handle_dma_error(ctx, SPE_EVENT_SPE_DATA_STORAGE);
190
191 return ret;
192}
193EXPORT_SYMBOL_GPL(spufs_handle_class1);
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index ae42e03b8c8..428875c5e4e 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -296,6 +296,14 @@ static int spu_hw_send_mfc_command(struct spu_context *ctx,
296 } 296 }
297} 297}
298 298
299static void spu_hw_restart_dma(struct spu_context *ctx)
300{
301 struct spu_priv2 __iomem *priv2 = ctx->spu->priv2;
302
303 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags))
304 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
305}
306
299struct spu_context_ops spu_hw_ops = { 307struct spu_context_ops spu_hw_ops = {
300 .mbox_read = spu_hw_mbox_read, 308 .mbox_read = spu_hw_mbox_read,
301 .mbox_stat_read = spu_hw_mbox_stat_read, 309 .mbox_stat_read = spu_hw_mbox_stat_read,
@@ -320,4 +328,5 @@ struct spu_context_ops spu_hw_ops = {
320 .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus, 328 .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
321 .get_mfc_free_elements = spu_hw_get_mfc_free_elements, 329 .get_mfc_free_elements = spu_hw_get_mfc_free_elements,
322 .send_mfc_command = spu_hw_send_mfc_command, 330 .send_mfc_command = spu_hw_send_mfc_command,
331 .restart_dma = spu_hw_restart_dma,
323}; 332};
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 7df5202c9a9..1a8195bf75d 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -18,27 +18,6 @@ void spufs_stop_callback(struct spu *spu)
18 wake_up_all(&ctx->stop_wq); 18 wake_up_all(&ctx->stop_wq);
19} 19}
20 20
21void spufs_dma_callback(struct spu *spu, int type)
22{
23 struct spu_context *ctx = spu->ctx;
24
25 if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
26 ctx->event_return |= type;
27 wake_up_all(&ctx->stop_wq);
28 } else {
29 switch (type) {
30 case SPE_EVENT_DMA_ALIGNMENT:
31 case SPE_EVENT_SPE_DATA_STORAGE:
32 case SPE_EVENT_INVALID_DMA:
33 force_sig(SIGBUS, /* info, */ current);
34 break;
35 case SPE_EVENT_SPE_ERROR:
36 force_sig(SIGILL, /* info */ current);
37 break;
38 }
39 }
40}
41
42static inline int spu_stopped(struct spu_context *ctx, u32 * stat) 21static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
43{ 22{
44 struct spu *spu; 23 struct spu *spu;
@@ -294,11 +273,8 @@ int spu_process_callback(struct spu_context *ctx)
294static inline int spu_process_events(struct spu_context *ctx) 273static inline int spu_process_events(struct spu_context *ctx)
295{ 274{
296 struct spu *spu = ctx->spu; 275 struct spu *spu = ctx->spu;
297 u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
298 int ret = 0; 276 int ret = 0;
299 277
300 if (spu->dsisr & pte_fault)
301 ret = spu_irq_class_1_bottom(spu);
302 if (spu->class_0_pending) 278 if (spu->class_0_pending)
303 ret = spu_irq_class_0_bottom(spu); 279 ret = spu_irq_class_0_bottom(spu);
304 if (!ret && signal_pending(current)) 280 if (!ret && signal_pending(current))
@@ -332,6 +308,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
332 break; 308 break;
333 status &= ~SPU_STATUS_STOPPED_BY_STOP; 309 status &= ~SPU_STATUS_STOPPED_BY_STOP;
334 } 310 }
311 ret = spufs_handle_class1(ctx);
312 if (ret)
313 break;
314
335 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { 315 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
336 ret = spu_reacquire_runnable(ctx, npc, &status); 316 ret = spu_reacquire_runnable(ctx, npc, &status);
337 if (ret) { 317 if (ret) {
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index cae2ad435b0..9993c9b3cff 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -141,6 +141,7 @@ struct spu_context_ops {
141 struct spu_dma_info * info); 141 struct spu_dma_info * info);
142 void (*proxydma_info_read) (struct spu_context * ctx, 142 void (*proxydma_info_read) (struct spu_context * ctx,
143 struct spu_proxydma_info * info); 143 struct spu_proxydma_info * info);
144 void (*restart_dma)(struct spu_context *ctx);
144}; 145};
145 146
146extern struct spu_context_ops spu_hw_ops; 147extern struct spu_context_ops spu_hw_ops;
@@ -172,6 +173,9 @@ int put_spu_gang(struct spu_gang *gang);
172void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx); 173void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
173void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx); 174void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
174 175
176/* fault handling */
177int spufs_handle_class1(struct spu_context *ctx);
178
175/* context management */ 179/* context management */
176static inline void spu_acquire(struct spu_context *ctx) 180static inline void spu_acquire(struct spu_context *ctx)
177{ 181{
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index fd91c73de34..8347c4a3f89 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -2084,6 +2084,10 @@ int spu_save(struct spu_state *prev, struct spu *spu)
2084 int rc; 2084 int rc;
2085 2085
2086 acquire_spu_lock(spu); /* Step 1. */ 2086 acquire_spu_lock(spu); /* Step 1. */
2087 prev->dar = spu->dar;
2088 prev->dsisr = spu->dsisr;
2089 spu->dar = 0;
2090 spu->dsisr = 0;
2087 rc = __do_spu_save(prev, spu); /* Steps 2-53. */ 2091 rc = __do_spu_save(prev, spu); /* Steps 2-53. */
2088 release_spu_lock(spu); 2092 release_spu_lock(spu);
2089 if (rc != 0 && rc != 2 && rc != 6) { 2093 if (rc != 0 && rc != 2 && rc != 6) {
@@ -2109,9 +2113,9 @@ int spu_restore(struct spu_state *new, struct spu *spu)
2109 2113
2110 acquire_spu_lock(spu); 2114 acquire_spu_lock(spu);
2111 harvest(NULL, spu); 2115 harvest(NULL, spu);
2112 spu->dar = 0;
2113 spu->dsisr = 0;
2114 spu->slb_replace = 0; 2116 spu->slb_replace = 0;
2117 new->dar = 0;
2118 new->dsisr = 0;
2115 spu->class_0_pending = 0; 2119 spu->class_0_pending = 0;
2116 rc = __do_spu_restore(new, spu); 2120 rc = __do_spu_restore(new, spu);
2117 release_spu_lock(spu); 2121 release_spu_lock(spu);