diff options
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/run.c')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 123 |
1 files changed, 58 insertions, 65 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index f95a611ca362..57626600b1a4 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -18,27 +18,6 @@ void spufs_stop_callback(struct spu *spu) | |||
18 | wake_up_all(&ctx->stop_wq); | 18 | wake_up_all(&ctx->stop_wq); |
19 | } | 19 | } |
20 | 20 | ||
21 | void spufs_dma_callback(struct spu *spu, int type) | ||
22 | { | ||
23 | struct spu_context *ctx = spu->ctx; | ||
24 | |||
25 | if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { | ||
26 | ctx->event_return |= type; | ||
27 | wake_up_all(&ctx->stop_wq); | ||
28 | } else { | ||
29 | switch (type) { | ||
30 | case SPE_EVENT_DMA_ALIGNMENT: | ||
31 | case SPE_EVENT_SPE_DATA_STORAGE: | ||
32 | case SPE_EVENT_INVALID_DMA: | ||
33 | force_sig(SIGBUS, /* info, */ current); | ||
34 | break; | ||
35 | case SPE_EVENT_SPE_ERROR: | ||
36 | force_sig(SIGILL, /* info */ current); | ||
37 | break; | ||
38 | } | ||
39 | } | ||
40 | } | ||
41 | |||
42 | static inline int spu_stopped(struct spu_context *ctx, u32 * stat) | 21 | static inline int spu_stopped(struct spu_context *ctx, u32 * stat) |
43 | { | 22 | { |
44 | struct spu *spu; | 23 | struct spu *spu; |
@@ -63,13 +42,18 @@ static int spu_setup_isolated(struct spu_context *ctx) | |||
63 | const u32 status_loading = SPU_STATUS_RUNNING | 42 | const u32 status_loading = SPU_STATUS_RUNNING |
64 | | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; | 43 | | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; |
65 | 44 | ||
45 | ret = -ENODEV; | ||
66 | if (!isolated_loader) | 46 | if (!isolated_loader) |
67 | return -ENODEV; | ||
68 | |||
69 | ret = spu_acquire_exclusive(ctx); | ||
70 | if (ret) | ||
71 | goto out; | 47 | goto out; |
72 | 48 | ||
49 | /* | ||
50 | * We need to exclude userspace access to the context. | ||
51 | * | ||
52 | * To protect against memory access we invalidate all ptes | ||
53 | * and make sure the pagefault handlers block on the mutex. | ||
54 | */ | ||
55 | spu_unmap_mappings(ctx); | ||
56 | |||
73 | mfc_cntl = &ctx->spu->priv2->mfc_control_RW; | 57 | mfc_cntl = &ctx->spu->priv2->mfc_control_RW; |
74 | 58 | ||
75 | /* purge the MFC DMA queue to ensure no spurious accesses before we | 59 | /* purge the MFC DMA queue to ensure no spurious accesses before we |
@@ -82,7 +66,7 @@ static int spu_setup_isolated(struct spu_context *ctx) | |||
82 | printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", | 66 | printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", |
83 | __FUNCTION__); | 67 | __FUNCTION__); |
84 | ret = -EIO; | 68 | ret = -EIO; |
85 | goto out_unlock; | 69 | goto out; |
86 | } | 70 | } |
87 | cond_resched(); | 71 | cond_resched(); |
88 | } | 72 | } |
@@ -119,12 +103,15 @@ static int spu_setup_isolated(struct spu_context *ctx) | |||
119 | pr_debug("%s: isolated LOAD failed\n", __FUNCTION__); | 103 | pr_debug("%s: isolated LOAD failed\n", __FUNCTION__); |
120 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); | 104 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); |
121 | ret = -EACCES; | 105 | ret = -EACCES; |
106 | goto out_drop_priv; | ||
107 | } | ||
122 | 108 | ||
123 | } else if (!(status & SPU_STATUS_ISOLATED_STATE)) { | 109 | if (!(status & SPU_STATUS_ISOLATED_STATE)) { |
124 | /* This isn't allowed by the CBEA, but check anyway */ | 110 | /* This isn't allowed by the CBEA, but check anyway */ |
125 | pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__); | 111 | pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__); |
126 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); | 112 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); |
127 | ret = -EINVAL; | 113 | ret = -EINVAL; |
114 | goto out_drop_priv; | ||
128 | } | 115 | } |
129 | 116 | ||
130 | out_drop_priv: | 117 | out_drop_priv: |
@@ -132,30 +119,19 @@ out_drop_priv: | |||
132 | sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; | 119 | sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; |
133 | spu_mfc_sr1_set(ctx->spu, sr1); | 120 | spu_mfc_sr1_set(ctx->spu, sr1); |
134 | 121 | ||
135 | out_unlock: | ||
136 | spu_release(ctx); | ||
137 | out: | 122 | out: |
138 | return ret; | 123 | return ret; |
139 | } | 124 | } |
140 | 125 | ||
141 | static inline int spu_run_init(struct spu_context *ctx, u32 * npc) | 126 | static int spu_run_init(struct spu_context *ctx, u32 * npc) |
142 | { | 127 | { |
143 | int ret; | ||
144 | unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; | ||
145 | |||
146 | ret = spu_acquire_runnable(ctx, 0); | ||
147 | if (ret) | ||
148 | return ret; | ||
149 | |||
150 | if (ctx->flags & SPU_CREATE_ISOLATE) { | 128 | if (ctx->flags & SPU_CREATE_ISOLATE) { |
129 | unsigned long runcntl; | ||
130 | |||
151 | if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { | 131 | if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { |
152 | /* Need to release ctx, because spu_setup_isolated will | 132 | int ret = spu_setup_isolated(ctx); |
153 | * acquire it exclusively. | 133 | if (ret) |
154 | */ | 134 | return ret; |
155 | spu_release(ctx); | ||
156 | ret = spu_setup_isolated(ctx); | ||
157 | if (!ret) | ||
158 | ret = spu_acquire_runnable(ctx, 0); | ||
159 | } | 135 | } |
160 | 136 | ||
161 | /* if userspace has set the runcntrl register (eg, to issue an | 137 | /* if userspace has set the runcntrl register (eg, to issue an |
@@ -164,16 +140,17 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) | |||
164 | (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); | 140 | (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); |
165 | if (runcntl == 0) | 141 | if (runcntl == 0) |
166 | runcntl = SPU_RUNCNTL_RUNNABLE; | 142 | runcntl = SPU_RUNCNTL_RUNNABLE; |
143 | ctx->ops->runcntl_write(ctx, runcntl); | ||
167 | } else { | 144 | } else { |
168 | spu_start_tick(ctx); | 145 | spu_start_tick(ctx); |
169 | ctx->ops->npc_write(ctx, *npc); | 146 | ctx->ops->npc_write(ctx, *npc); |
147 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); | ||
170 | } | 148 | } |
171 | 149 | ||
172 | ctx->ops->runcntl_write(ctx, runcntl); | 150 | return 0; |
173 | return ret; | ||
174 | } | 151 | } |
175 | 152 | ||
176 | static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, | 153 | static int spu_run_fini(struct spu_context *ctx, u32 * npc, |
177 | u32 * status) | 154 | u32 * status) |
178 | { | 155 | { |
179 | int ret = 0; | 156 | int ret = 0; |
@@ -189,19 +166,27 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, | |||
189 | return ret; | 166 | return ret; |
190 | } | 167 | } |
191 | 168 | ||
192 | static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc, | 169 | static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc, |
193 | u32 *status) | 170 | u32 *status) |
194 | { | 171 | { |
195 | int ret; | 172 | int ret; |
196 | 173 | ||
197 | if ((ret = spu_run_fini(ctx, npc, status)) != 0) | 174 | ret = spu_run_fini(ctx, npc, status); |
175 | if (ret) | ||
198 | return ret; | 176 | return ret; |
199 | if (*status & (SPU_STATUS_STOPPED_BY_STOP | | 177 | |
200 | SPU_STATUS_STOPPED_BY_HALT)) { | 178 | if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT)) |
201 | return *status; | 179 | return *status; |
202 | } | 180 | |
203 | if ((ret = spu_run_init(ctx, npc)) != 0) | 181 | ret = spu_acquire_runnable(ctx, 0); |
182 | if (ret) | ||
183 | return ret; | ||
184 | |||
185 | ret = spu_run_init(ctx, npc); | ||
186 | if (ret) { | ||
187 | spu_release(ctx); | ||
204 | return ret; | 188 | return ret; |
189 | } | ||
205 | return 0; | 190 | return 0; |
206 | } | 191 | } |
207 | 192 | ||
@@ -253,17 +238,17 @@ int spu_process_callback(struct spu_context *ctx) | |||
253 | { | 238 | { |
254 | struct spu_syscall_block s; | 239 | struct spu_syscall_block s; |
255 | u32 ls_pointer, npc; | 240 | u32 ls_pointer, npc; |
256 | char *ls; | 241 | void __iomem *ls; |
257 | long spu_ret; | 242 | long spu_ret; |
258 | int ret; | 243 | int ret; |
259 | 244 | ||
260 | /* get syscall block from local store */ | 245 | /* get syscall block from local store */ |
261 | npc = ctx->ops->npc_read(ctx); | 246 | npc = ctx->ops->npc_read(ctx) & ~3; |
262 | ls = ctx->ops->get_ls(ctx); | 247 | ls = (void __iomem *)ctx->ops->get_ls(ctx); |
263 | ls_pointer = *(u32*)(ls + npc); | 248 | ls_pointer = in_be32(ls + npc); |
264 | if (ls_pointer > (LS_SIZE - sizeof(s))) | 249 | if (ls_pointer > (LS_SIZE - sizeof(s))) |
265 | return -EFAULT; | 250 | return -EFAULT; |
266 | memcpy(&s, ls + ls_pointer, sizeof (s)); | 251 | memcpy_fromio(&s, ls + ls_pointer, sizeof(s)); |
267 | 252 | ||
268 | /* do actual syscall without pinning the spu */ | 253 | /* do actual syscall without pinning the spu */ |
269 | ret = 0; | 254 | ret = 0; |
@@ -283,7 +268,7 @@ int spu_process_callback(struct spu_context *ctx) | |||
283 | } | 268 | } |
284 | 269 | ||
285 | /* write result, jump over indirect pointer */ | 270 | /* write result, jump over indirect pointer */ |
286 | memcpy(ls + ls_pointer, &spu_ret, sizeof (spu_ret)); | 271 | memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret)); |
287 | ctx->ops->npc_write(ctx, npc); | 272 | ctx->ops->npc_write(ctx, npc); |
288 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); | 273 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); |
289 | return ret; | 274 | return ret; |
@@ -292,11 +277,8 @@ int spu_process_callback(struct spu_context *ctx) | |||
292 | static inline int spu_process_events(struct spu_context *ctx) | 277 | static inline int spu_process_events(struct spu_context *ctx) |
293 | { | 278 | { |
294 | struct spu *spu = ctx->spu; | 279 | struct spu *spu = ctx->spu; |
295 | u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED; | ||
296 | int ret = 0; | 280 | int ret = 0; |
297 | 281 | ||
298 | if (spu->dsisr & pte_fault) | ||
299 | ret = spu_irq_class_1_bottom(spu); | ||
300 | if (spu->class_0_pending) | 282 | if (spu->class_0_pending) |
301 | ret = spu_irq_class_0_bottom(spu); | 283 | ret = spu_irq_class_0_bottom(spu); |
302 | if (!ret && signal_pending(current)) | 284 | if (!ret && signal_pending(current)) |
@@ -310,14 +292,21 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, | |||
310 | int ret; | 292 | int ret; |
311 | u32 status; | 293 | u32 status; |
312 | 294 | ||
313 | if (down_interruptible(&ctx->run_sema)) | 295 | if (mutex_lock_interruptible(&ctx->run_mutex)) |
314 | return -ERESTARTSYS; | 296 | return -ERESTARTSYS; |
315 | 297 | ||
316 | ctx->ops->master_start(ctx); | 298 | ctx->ops->master_start(ctx); |
317 | ctx->event_return = 0; | 299 | ctx->event_return = 0; |
318 | ret = spu_run_init(ctx, npc); | 300 | |
301 | ret = spu_acquire_runnable(ctx, 0); | ||
319 | if (ret) | 302 | if (ret) |
303 | return ret; | ||
304 | |||
305 | ret = spu_run_init(ctx, npc); | ||
306 | if (ret) { | ||
307 | spu_release(ctx); | ||
320 | goto out; | 308 | goto out; |
309 | } | ||
321 | 310 | ||
322 | do { | 311 | do { |
323 | ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); | 312 | ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); |
@@ -330,6 +319,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, | |||
330 | break; | 319 | break; |
331 | status &= ~SPU_STATUS_STOPPED_BY_STOP; | 320 | status &= ~SPU_STATUS_STOPPED_BY_STOP; |
332 | } | 321 | } |
322 | ret = spufs_handle_class1(ctx); | ||
323 | if (ret) | ||
324 | break; | ||
325 | |||
333 | if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { | 326 | if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { |
334 | ret = spu_reacquire_runnable(ctx, npc, &status); | 327 | ret = spu_reacquire_runnable(ctx, npc, &status); |
335 | if (ret) { | 328 | if (ret) { |
@@ -363,6 +356,6 @@ out2: | |||
363 | 356 | ||
364 | out: | 357 | out: |
365 | *event = ctx->event_return; | 358 | *event = ctx->event_return; |
366 | up(&ctx->run_sema); | 359 | mutex_unlock(&ctx->run_mutex); |
367 | return ret; | 360 | return ret; |
368 | } | 361 | } |