diff options
author | Jeremy Kerr <jeremy@au1.ibm.com> | 2006-10-24 12:31:19 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-10-25 00:20:21 -0400 |
commit | 099814bb1f9bd9081d7c85867f8eb8c049abc1b9 (patch) | |
tree | d8a7a0a064bb922b90bf0a5c03f9864783c629df /arch | |
parent | 0afacde3df4c9980f505d9afd7cb0058389732ca (diff) |
[POWERPC] spufs: Add isolated-mode SPE recycling support
When in isolated mode, SPEs have access to an area of persistent
storage, which is per-SPE. In order for isolated-mode apps to
communicate arbitrary data through this storage, we need to ensure that
isolated physical SPEs can be reused for subsequent applications.
Add a file ("recycle") in a spethread dir to enable isolated-mode
recycling. By writing to this file, the kernel will reload the
isolated-mode loader kernel, allowing a new app to be run on the same
physical SPE.
This requires the spu_acquire_exclusive function to enforce exclusive
access to the SPE while the loader is initialised.
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/context.c | 27 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/file.c | 32 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/inode.c | 23 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 7 |
4 files changed, 81 insertions, 8 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 034cf6af53a2..48eb050bcf4b 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c | |||
@@ -120,6 +120,33 @@ void spu_unmap_mappings(struct spu_context *ctx) | |||
120 | unmap_mapping_range(ctx->signal2, 0, 0x4000, 1); | 120 | unmap_mapping_range(ctx->signal2, 0, 0x4000, 1); |
121 | } | 121 | } |
122 | 122 | ||
123 | int spu_acquire_exclusive(struct spu_context *ctx) | ||
124 | { | ||
125 | int ret = 0; | ||
126 | |||
127 | down_write(&ctx->state_sema); | ||
128 | /* ctx is about to be freed, can't acquire any more */ | ||
129 | if (!ctx->owner) { | ||
130 | ret = -EINVAL; | ||
131 | goto out; | ||
132 | } | ||
133 | |||
134 | if (ctx->state == SPU_STATE_SAVED) { | ||
135 | ret = spu_activate(ctx, 0); | ||
136 | if (ret) | ||
137 | goto out; | ||
138 | ctx->state = SPU_STATE_RUNNABLE; | ||
139 | } else { | ||
140 | /* We need to exclude userspace access to the context. */ | ||
141 | spu_unmap_mappings(ctx); | ||
142 | } | ||
143 | |||
144 | out: | ||
145 | if (ret) | ||
146 | up_write(&ctx->state_sema); | ||
147 | return ret; | ||
148 | } | ||
149 | |||
123 | int spu_acquire_runnable(struct spu_context *ctx) | 150 | int spu_acquire_runnable(struct spu_context *ctx) |
124 | { | 151 | { |
125 | int ret = 0; | 152 | int ret = 0; |
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 8ca330671ad1..5b8ba6c3aa3c 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -1343,6 +1343,37 @@ static struct file_operations spufs_mfc_fops = { | |||
1343 | .mmap = spufs_mfc_mmap, | 1343 | .mmap = spufs_mfc_mmap, |
1344 | }; | 1344 | }; |
1345 | 1345 | ||
1346 | |||
1347 | static int spufs_recycle_open(struct inode *inode, struct file *file) | ||
1348 | { | ||
1349 | file->private_data = SPUFS_I(inode)->i_ctx; | ||
1350 | return nonseekable_open(inode, file); | ||
1351 | } | ||
1352 | |||
1353 | static ssize_t spufs_recycle_write(struct file *file, | ||
1354 | const char __user *buffer, size_t size, loff_t *pos) | ||
1355 | { | ||
1356 | struct spu_context *ctx = file->private_data; | ||
1357 | int ret; | ||
1358 | |||
1359 | if (!(ctx->flags & SPU_CREATE_ISOLATE)) | ||
1360 | return -EINVAL; | ||
1361 | |||
1362 | if (size < 1) | ||
1363 | return -EINVAL; | ||
1364 | |||
1365 | ret = spu_recycle_isolated(ctx); | ||
1366 | |||
1367 | if (ret) | ||
1368 | return ret; | ||
1369 | return size; | ||
1370 | } | ||
1371 | |||
1372 | static struct file_operations spufs_recycle_fops = { | ||
1373 | .open = spufs_recycle_open, | ||
1374 | .write = spufs_recycle_write, | ||
1375 | }; | ||
1376 | |||
1346 | static void spufs_npc_set(void *data, u64 val) | 1377 | static void spufs_npc_set(void *data, u64 val) |
1347 | { | 1378 | { |
1348 | struct spu_context *ctx = data; | 1379 | struct spu_context *ctx = data; |
@@ -1551,5 +1582,6 @@ struct tree_descr spufs_dir_nosched_contents[] = { | |||
1551 | { "psmap", &spufs_psmap_fops, 0666, }, | 1582 | { "psmap", &spufs_psmap_fops, 0666, }, |
1552 | { "phys-id", &spufs_id_ops, 0666, }, | 1583 | { "phys-id", &spufs_id_ops, 0666, }, |
1553 | { "object-id", &spufs_object_id_ops, 0666, }, | 1584 | { "object-id", &spufs_object_id_ops, 0666, }, |
1585 | { "recycle", &spufs_recycle_fops, 0222, }, | ||
1554 | {}, | 1586 | {}, |
1555 | }; | 1587 | }; |
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index c8751936672a..9e457be140ef 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c | |||
@@ -248,7 +248,7 @@ static int spu_setup_isolated(struct spu_context *ctx) | |||
248 | if (!isolated_loader) | 248 | if (!isolated_loader) |
249 | return -ENODEV; | 249 | return -ENODEV; |
250 | 250 | ||
251 | if ((ret = spu_acquire_runnable(ctx)) != 0) | 251 | if ((ret = spu_acquire_exclusive(ctx)) != 0) |
252 | return ret; | 252 | return ret; |
253 | 253 | ||
254 | mfc_cntl = &ctx->spu->priv2->mfc_control_RW; | 254 | mfc_cntl = &ctx->spu->priv2->mfc_control_RW; |
@@ -314,10 +314,16 @@ out_drop_priv: | |||
314 | spu_mfc_sr1_set(ctx->spu, sr1); | 314 | spu_mfc_sr1_set(ctx->spu, sr1); |
315 | 315 | ||
316 | out_unlock: | 316 | out_unlock: |
317 | up_write(&ctx->state_sema); | 317 | spu_release_exclusive(ctx); |
318 | return ret; | 318 | return ret; |
319 | } | 319 | } |
320 | 320 | ||
321 | int spu_recycle_isolated(struct spu_context *ctx) | ||
322 | { | ||
323 | ctx->ops->runcntl_stop(ctx); | ||
324 | return spu_setup_isolated(ctx); | ||
325 | } | ||
326 | |||
321 | static int | 327 | static int |
322 | spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, | 328 | spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, |
323 | int mode) | 329 | int mode) |
@@ -341,12 +347,6 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, | |||
341 | goto out_iput; | 347 | goto out_iput; |
342 | 348 | ||
343 | ctx->flags = flags; | 349 | ctx->flags = flags; |
344 | if (flags & SPU_CREATE_ISOLATE) { | ||
345 | ret = spu_setup_isolated(ctx); | ||
346 | if (ret) | ||
347 | goto out_iput; | ||
348 | } | ||
349 | |||
350 | inode->i_op = &spufs_dir_inode_operations; | 350 | inode->i_op = &spufs_dir_inode_operations; |
351 | inode->i_fop = &simple_dir_operations; | 351 | inode->i_fop = &simple_dir_operations; |
352 | if (flags & SPU_CREATE_NOSCHED) | 352 | if (flags & SPU_CREATE_NOSCHED) |
@@ -432,6 +432,13 @@ static int spufs_create_context(struct inode *inode, | |||
432 | out_unlock: | 432 | out_unlock: |
433 | mutex_unlock(&inode->i_mutex); | 433 | mutex_unlock(&inode->i_mutex); |
434 | out: | 434 | out: |
435 | if (ret >= 0 && (flags & SPU_CREATE_ISOLATE)) { | ||
436 | int setup_err = spu_setup_isolated( | ||
437 | SPUFS_I(dentry->d_inode)->i_ctx); | ||
438 | if (setup_err) | ||
439 | ret = setup_err; | ||
440 | } | ||
441 | |||
435 | dput(dentry); | 442 | dput(dentry); |
436 | return ret; | 443 | return ret; |
437 | } | 444 | } |
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index b17b809ecd77..f438f0b8525d 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h | |||
@@ -163,6 +163,12 @@ void spu_acquire(struct spu_context *ctx); | |||
163 | void spu_release(struct spu_context *ctx); | 163 | void spu_release(struct spu_context *ctx); |
164 | int spu_acquire_runnable(struct spu_context *ctx); | 164 | int spu_acquire_runnable(struct spu_context *ctx); |
165 | void spu_acquire_saved(struct spu_context *ctx); | 165 | void spu_acquire_saved(struct spu_context *ctx); |
166 | int spu_acquire_exclusive(struct spu_context *ctx); | ||
167 | |||
168 | static inline void spu_release_exclusive(struct spu_context *ctx) | ||
169 | { | ||
170 | up_write(&ctx->state_sema); | ||
171 | } | ||
166 | 172 | ||
167 | int spu_activate(struct spu_context *ctx, u64 flags); | 173 | int spu_activate(struct spu_context *ctx, u64 flags); |
168 | void spu_deactivate(struct spu_context *ctx); | 174 | void spu_deactivate(struct spu_context *ctx); |
@@ -170,6 +176,7 @@ void spu_yield(struct spu_context *ctx); | |||
170 | int __init spu_sched_init(void); | 176 | int __init spu_sched_init(void); |
171 | void __exit spu_sched_exit(void); | 177 | void __exit spu_sched_exit(void); |
172 | 178 | ||
179 | int spu_recycle_isolated(struct spu_context *ctx); | ||
173 | /* | 180 | /* |
174 | * spufs_wait | 181 | * spufs_wait |
175 | * Same as wait_event_interruptible(), except that here | 182 | * Same as wait_event_interruptible(), except that here |