diff options
author | Arnd Bergmann <arnd@arndb.de> | 2006-11-20 12:45:08 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-12-04 04:40:02 -0500 |
commit | ee2d7340cbf3b123e1c3b7454f3e2b7e65d33bb2 (patch) | |
tree | 2953689e0efdb35195e46c75148bfb91030f71fa | |
parent | 3692dc66149dc17cd82ec785a06478322c0eddff (diff) |
[POWERPC] spufs: Use SPU master control to prevent wild SPU execution
When the user changes the runcontrol register, an SPU might be
running without a process being attached to it and waiting for
events. In order to prevent this, make sure we always disable
the priv1 master control when we're not inside of spu_run.
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/backing_ops.c | 24 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/context.c | 42 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/hw_ops.c | 28 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/inode.c | 15 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 3 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 3 |
6 files changed, 79 insertions, 36 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c index 21b28f61b00f..4a8e998c6be6 100644 --- a/arch/powerpc/platforms/cell/spufs/backing_ops.c +++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c | |||
@@ -280,9 +280,26 @@ static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val) | |||
280 | spin_unlock(&ctx->csa.register_lock); | 280 | spin_unlock(&ctx->csa.register_lock); |
281 | } | 281 | } |
282 | 282 | ||
283 | static void spu_backing_runcntl_stop(struct spu_context *ctx) | 283 | static void spu_backing_master_start(struct spu_context *ctx) |
284 | { | 284 | { |
285 | spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP); | 285 | struct spu_state *csa = &ctx->csa; |
286 | u64 sr1; | ||
287 | |||
288 | spin_lock(&csa->register_lock); | ||
289 | sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK; | ||
290 | csa->priv1.mfc_sr1_RW = sr1; | ||
291 | spin_unlock(&csa->register_lock); | ||
292 | } | ||
293 | |||
294 | static void spu_backing_master_stop(struct spu_context *ctx) | ||
295 | { | ||
296 | struct spu_state *csa = &ctx->csa; | ||
297 | u64 sr1; | ||
298 | |||
299 | spin_lock(&csa->register_lock); | ||
300 | sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; | ||
301 | csa->priv1.mfc_sr1_RW = sr1; | ||
302 | spin_unlock(&csa->register_lock); | ||
286 | } | 303 | } |
287 | 304 | ||
288 | static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask, | 305 | static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask, |
@@ -347,7 +364,8 @@ struct spu_context_ops spu_backing_ops = { | |||
347 | .status_read = spu_backing_status_read, | 364 | .status_read = spu_backing_status_read, |
348 | .get_ls = spu_backing_get_ls, | 365 | .get_ls = spu_backing_get_ls, |
349 | .runcntl_write = spu_backing_runcntl_write, | 366 | .runcntl_write = spu_backing_runcntl_write, |
350 | .runcntl_stop = spu_backing_runcntl_stop, | 367 | .master_start = spu_backing_master_start, |
368 | .master_stop = spu_backing_master_stop, | ||
351 | .set_mfc_query = spu_backing_set_mfc_query, | 369 | .set_mfc_query = spu_backing_set_mfc_query, |
352 | .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus, | 370 | .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus, |
353 | .get_mfc_free_elements = spu_backing_get_mfc_free_elements, | 371 | .get_mfc_free_elements = spu_backing_get_mfc_free_elements, |
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 48eb050bcf4b..0870009f56db 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c | |||
@@ -122,29 +122,29 @@ void spu_unmap_mappings(struct spu_context *ctx) | |||
122 | 122 | ||
123 | int spu_acquire_exclusive(struct spu_context *ctx) | 123 | int spu_acquire_exclusive(struct spu_context *ctx) |
124 | { | 124 | { |
125 | int ret = 0; | 125 | int ret = 0; |
126 | 126 | ||
127 | down_write(&ctx->state_sema); | 127 | down_write(&ctx->state_sema); |
128 | /* ctx is about to be freed, can't acquire any more */ | 128 | /* ctx is about to be freed, can't acquire any more */ |
129 | if (!ctx->owner) { | 129 | if (!ctx->owner) { |
130 | ret = -EINVAL; | 130 | ret = -EINVAL; |
131 | goto out; | 131 | goto out; |
132 | } | 132 | } |
133 | 133 | ||
134 | if (ctx->state == SPU_STATE_SAVED) { | 134 | if (ctx->state == SPU_STATE_SAVED) { |
135 | ret = spu_activate(ctx, 0); | 135 | ret = spu_activate(ctx, 0); |
136 | if (ret) | 136 | if (ret) |
137 | goto out; | 137 | goto out; |
138 | ctx->state = SPU_STATE_RUNNABLE; | 138 | ctx->state = SPU_STATE_RUNNABLE; |
139 | } else { | 139 | } else { |
140 | /* We need to exclude userspace access to the context. */ | 140 | /* We need to exclude userspace access to the context. */ |
141 | spu_unmap_mappings(ctx); | 141 | spu_unmap_mappings(ctx); |
142 | } | 142 | } |
143 | 143 | ||
144 | out: | 144 | out: |
145 | if (ret) | 145 | if (ret) |
146 | up_write(&ctx->state_sema); | 146 | up_write(&ctx->state_sema); |
147 | return ret; | 147 | return ret; |
148 | } | 148 | } |
149 | 149 | ||
150 | int spu_acquire_runnable(struct spu_context *ctx) | 150 | int spu_acquire_runnable(struct spu_context *ctx) |
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c index 79c304e815a1..69fc342e063d 100644 --- a/arch/powerpc/platforms/cell/spufs/hw_ops.c +++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c | |||
@@ -216,13 +216,26 @@ static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val) | |||
216 | spin_unlock_irq(&ctx->spu->register_lock); | 216 | spin_unlock_irq(&ctx->spu->register_lock); |
217 | } | 217 | } |
218 | 218 | ||
219 | static void spu_hw_runcntl_stop(struct spu_context *ctx) | 219 | static void spu_hw_master_start(struct spu_context *ctx) |
220 | { | 220 | { |
221 | spin_lock_irq(&ctx->spu->register_lock); | 221 | struct spu *spu = ctx->spu; |
222 | out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP); | 222 | u64 sr1; |
223 | while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING) | 223 | |
224 | cpu_relax(); | 224 | spin_lock_irq(&spu->register_lock); |
225 | spin_unlock_irq(&ctx->spu->register_lock); | 225 | sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK; |
226 | spu_mfc_sr1_set(spu, sr1); | ||
227 | spin_unlock_irq(&spu->register_lock); | ||
228 | } | ||
229 | |||
230 | static void spu_hw_master_stop(struct spu_context *ctx) | ||
231 | { | ||
232 | struct spu *spu = ctx->spu; | ||
233 | u64 sr1; | ||
234 | |||
235 | spin_lock_irq(&spu->register_lock); | ||
236 | sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; | ||
237 | spu_mfc_sr1_set(spu, sr1); | ||
238 | spin_unlock_irq(&spu->register_lock); | ||
226 | } | 239 | } |
227 | 240 | ||
228 | static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode) | 241 | static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode) |
@@ -295,7 +308,8 @@ struct spu_context_ops spu_hw_ops = { | |||
295 | .status_read = spu_hw_status_read, | 308 | .status_read = spu_hw_status_read, |
296 | .get_ls = spu_hw_get_ls, | 309 | .get_ls = spu_hw_get_ls, |
297 | .runcntl_write = spu_hw_runcntl_write, | 310 | .runcntl_write = spu_hw_runcntl_write, |
298 | .runcntl_stop = spu_hw_runcntl_stop, | 311 | .master_start = spu_hw_master_start, |
312 | .master_stop = spu_hw_master_stop, | ||
299 | .set_mfc_query = spu_hw_set_mfc_query, | 313 | .set_mfc_query = spu_hw_set_mfc_query, |
300 | .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus, | 314 | .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus, |
301 | .get_mfc_free_elements = spu_hw_get_mfc_free_elements, | 315 | .get_mfc_free_elements = spu_hw_get_mfc_free_elements, |
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 9e457be140ef..1fbcc5369243 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c | |||
@@ -248,8 +248,13 @@ static int spu_setup_isolated(struct spu_context *ctx) | |||
248 | if (!isolated_loader) | 248 | if (!isolated_loader) |
249 | return -ENODEV; | 249 | return -ENODEV; |
250 | 250 | ||
251 | if ((ret = spu_acquire_exclusive(ctx)) != 0) | 251 | /* prevent concurrent operation with spu_run */ |
252 | return ret; | 252 | down(&ctx->run_sema); |
253 | ctx->ops->master_start(ctx); | ||
254 | |||
255 | ret = spu_acquire_exclusive(ctx); | ||
256 | if (ret) | ||
257 | goto out; | ||
253 | 258 | ||
254 | mfc_cntl = &ctx->spu->priv2->mfc_control_RW; | 259 | mfc_cntl = &ctx->spu->priv2->mfc_control_RW; |
255 | 260 | ||
@@ -315,12 +320,14 @@ out_drop_priv: | |||
315 | 320 | ||
316 | out_unlock: | 321 | out_unlock: |
317 | spu_release_exclusive(ctx); | 322 | spu_release_exclusive(ctx); |
323 | out: | ||
324 | ctx->ops->master_stop(ctx); | ||
325 | up(&ctx->run_sema); | ||
318 | return ret; | 326 | return ret; |
319 | } | 327 | } |
320 | 328 | ||
321 | int spu_recycle_isolated(struct spu_context *ctx) | 329 | int spu_recycle_isolated(struct spu_context *ctx) |
322 | { | 330 | { |
323 | ctx->ops->runcntl_stop(ctx); | ||
324 | return spu_setup_isolated(ctx); | 331 | return spu_setup_isolated(ctx); |
325 | } | 332 | } |
326 | 333 | ||
@@ -435,6 +442,8 @@ out: | |||
435 | if (ret >= 0 && (flags & SPU_CREATE_ISOLATE)) { | 442 | if (ret >= 0 && (flags & SPU_CREATE_ISOLATE)) { |
436 | int setup_err = spu_setup_isolated( | 443 | int setup_err = spu_setup_isolated( |
437 | SPUFS_I(dentry->d_inode)->i_ctx); | 444 | SPUFS_I(dentry->d_inode)->i_ctx); |
445 | /* FIXME: clean up context again on failure to avoid | ||
446 | leak. */ | ||
438 | if (setup_err) | 447 | if (setup_err) |
439 | ret = setup_err; | 448 | ret = setup_err; |
440 | } | 449 | } |
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index c88fd7f9ea74..212b9c2f04ab 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -207,6 +207,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, | |||
207 | if (down_interruptible(&ctx->run_sema)) | 207 | if (down_interruptible(&ctx->run_sema)) |
208 | return -ERESTARTSYS; | 208 | return -ERESTARTSYS; |
209 | 209 | ||
210 | ctx->ops->master_start(ctx); | ||
210 | ctx->event_return = 0; | 211 | ctx->event_return = 0; |
211 | ret = spu_run_init(ctx, npc); | 212 | ret = spu_run_init(ctx, npc); |
212 | if (ret) | 213 | if (ret) |
@@ -234,7 +235,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, | |||
234 | } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | | 235 | } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | |
235 | SPU_STATUS_STOPPED_BY_HALT))); | 236 | SPU_STATUS_STOPPED_BY_HALT))); |
236 | 237 | ||
237 | ctx->ops->runcntl_stop(ctx); | 238 | ctx->ops->master_stop(ctx); |
238 | ret = spu_run_fini(ctx, npc, &status); | 239 | ret = spu_run_fini(ctx, npc, &status); |
239 | spu_yield(ctx); | 240 | spu_yield(ctx); |
240 | 241 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 3e7cfc246147..135fbb53d8e1 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h | |||
@@ -116,7 +116,8 @@ struct spu_context_ops { | |||
116 | u32(*status_read) (struct spu_context * ctx); | 116 | u32(*status_read) (struct spu_context * ctx); |
117 | char*(*get_ls) (struct spu_context * ctx); | 117 | char*(*get_ls) (struct spu_context * ctx); |
118 | void (*runcntl_write) (struct spu_context * ctx, u32 data); | 118 | void (*runcntl_write) (struct spu_context * ctx, u32 data); |
119 | void (*runcntl_stop) (struct spu_context * ctx); | 119 | void (*master_start) (struct spu_context * ctx); |
120 | void (*master_stop) (struct spu_context * ctx); | ||
120 | int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode); | 121 | int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode); |
121 | u32 (*read_mfc_tagstatus)(struct spu_context * ctx); | 122 | u32 (*read_mfc_tagstatus)(struct spu_context * ctx); |
122 | u32 (*get_mfc_free_elements)(struct spu_context *ctx); | 123 | u32 (*get_mfc_free_elements)(struct spu_context *ctx); |