aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorarnd@arndb.de <arnd@arndb.de>2006-10-24 12:31:18 -0400
committerPaul Mackerras <paulus@samba.org>2006-10-25 00:20:21 -0400
commit0afacde3df4c9980f505d9afd7cb0058389732ca (patch)
tree0007cbb88b2ed7ba604fd11ca269f24617d2bb61 /arch/powerpc
parenteb758ce5b0d84e13cb643b6cc7cb429f6fa28258 (diff)
[POWERPC] spufs: allow isolated mode apps by starting the SPE loader
This patch adds general support for isolated mode SPE apps. Isolated apps are started indirectly, by a dedicated loader "kernel". This patch starts the loader when spe_create is invoked with the ISOLATE flag. We do this at spe_create time to allow libspe to pass the isolated app in before calling spe_run. The loader is read from the device tree, at the location "/spu-isolation/loader". If the loader is not present, an attempt to start an isolated SPE binary will fail with -ENODEV. Update: loader needs to be correctly aligned - copy to a kmalloced buf. Update: remove workaround for systemsim/spurom 'L-bit' bug, which has been fixed. Update: don't write to runcntl on spu_run_init: SPU is already running. Update: do spu_setup_isolated earlier Tested on systemsim. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c35
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c117
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c12
3 files changed, 148 insertions, 16 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index f6c94087db..d78b0af038 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -89,7 +89,30 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
89 printk("%s: invalid access during switch!\n", __func__); 89 printk("%s: invalid access during switch!\n", __func__);
90 return 1; 90 return 1;
91 } 91 }
92 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) { 92 esid = (ea & ESID_MASK) | SLB_ESID_V;
93
94 switch(REGION_ID(ea)) {
95 case USER_REGION_ID:
96#ifdef CONFIG_HUGETLB_PAGE
97 if (in_hugepage_area(mm->context, ea))
98 llp = mmu_psize_defs[mmu_huge_psize].sllp;
99 else
100#endif
101 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
102 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
103 SLB_VSID_USER | llp;
104 break;
105 case VMALLOC_REGION_ID:
106 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
107 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
108 SLB_VSID_KERNEL | llp;
109 break;
110 case KERNEL_REGION_ID:
111 llp = mmu_psize_defs[mmu_linear_psize].sllp;
112 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
113 SLB_VSID_KERNEL | llp;
114 break;
115 default:
93 /* Future: support kernel segments so that drivers 116 /* Future: support kernel segments so that drivers
94 * can use SPUs. 117 * can use SPUs.
95 */ 118 */
@@ -97,16 +120,6 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
97 return 1; 120 return 1;
98 } 121 }
99 122
100 esid = (ea & ESID_MASK) | SLB_ESID_V;
101#ifdef CONFIG_HUGETLB_PAGE
102 if (in_hugepage_area(mm->context, ea))
103 llp = mmu_psize_defs[mmu_huge_psize].sllp;
104 else
105#endif
106 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
107 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
108 SLB_VSID_USER | llp;
109
110 out_be64(&priv2->slb_index_W, spu->slb_replace); 123 out_be64(&priv2->slb_index_W, spu->slb_replace);
111 out_be64(&priv2->slb_vsid_RW, vsid); 124 out_be64(&priv2->slb_vsid_RW, vsid);
112 out_be64(&priv2->slb_esid_RW, esid); 125 out_be64(&priv2->slb_esid_RW, esid);
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 787ae71a68..c875193667 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -33,6 +33,8 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/parser.h> 34#include <linux/parser.h>
35 35
36#include <asm/prom.h>
37#include <asm/spu_priv1.h>
36#include <asm/io.h> 38#include <asm/io.h>
37#include <asm/semaphore.h> 39#include <asm/semaphore.h>
38#include <asm/spu.h> 40#include <asm/spu.h>
@@ -41,6 +43,7 @@
41#include "spufs.h" 43#include "spufs.h"
42 44
43static kmem_cache_t *spufs_inode_cache; 45static kmem_cache_t *spufs_inode_cache;
46static char *isolated_loader;
44 47
45static struct inode * 48static struct inode *
46spufs_alloc_inode(struct super_block *sb) 49spufs_alloc_inode(struct super_block *sb)
@@ -232,6 +235,89 @@ struct file_operations spufs_context_fops = {
232 .fsync = simple_sync_file, 235 .fsync = simple_sync_file,
233}; 236};
234 237
238static int spu_setup_isolated(struct spu_context *ctx)
239{
240 int ret;
241 u64 __iomem *mfc_cntl;
242 u64 sr1;
243 u32 status;
244 unsigned long timeout;
245 const u32 status_loading = SPU_STATUS_RUNNING
246 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
247
248 if (!isolated_loader)
249 return -ENODEV;
250
251 if ((ret = spu_acquire_runnable(ctx)) != 0)
252 return ret;
253
254 mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
255
256 /* purge the MFC DMA queue to ensure no spurious accesses before we
257 * enter kernel mode */
258 timeout = jiffies + HZ;
259 out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
260 while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
261 != MFC_CNTL_PURGE_DMA_COMPLETE) {
262 if (time_after(jiffies, timeout)) {
263 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
264 __FUNCTION__);
265 ret = -EIO;
266 goto out_unlock;
267 }
268 cond_resched();
269 }
270
271 /* put the SPE in kernel mode to allow access to the loader */
272 sr1 = spu_mfc_sr1_get(ctx->spu);
273 sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
274 spu_mfc_sr1_set(ctx->spu, sr1);
275
276 /* start the loader */
277 ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
278 ctx->ops->signal2_write(ctx,
279 (unsigned long)isolated_loader & 0xffffffff);
280
281 ctx->ops->runcntl_write(ctx,
282 SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
283
284 ret = 0;
285 timeout = jiffies + HZ;
286 while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
287 status_loading) {
288 if (time_after(jiffies, timeout)) {
289 printk(KERN_ERR "%s: timeout waiting for loader\n",
290 __FUNCTION__);
291 ret = -EIO;
292 goto out_drop_priv;
293 }
294 cond_resched();
295 }
296
297 if (!(status & SPU_STATUS_RUNNING)) {
298 /* If isolated LOAD has failed: run SPU, we will get a stop-and
299 * signal later. */
300 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
301 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
302 ret = -EACCES;
303
304 } else if (!(status & SPU_STATUS_ISOLATED_STATE)) {
305 /* This isn't allowed by the CBEA, but check anyway */
306 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
307 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
308 ret = -EINVAL;
309 }
310
311out_drop_priv:
312 /* Finished accessing the loader. Drop kernel mode */
313 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
314 spu_mfc_sr1_set(ctx->spu, sr1);
315
316out_unlock:
317 up_write(&ctx->state_sema);
318 return ret;
319}
320
235static int 321static int
236spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, 322spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
237 int mode) 323 int mode)
@@ -255,6 +341,11 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
255 goto out_iput; 341 goto out_iput;
256 342
257 ctx->flags = flags; 343 ctx->flags = flags;
344 if (flags & SPU_CREATE_ISOLATE) {
345 ret = spu_setup_isolated(ctx);
346 if (ret)
347 goto out_iput;
348 }
258 349
259 inode->i_op = &spufs_dir_inode_operations; 350 inode->i_op = &spufs_dir_inode_operations;
260 inode->i_fop = &simple_dir_operations; 351 inode->i_fop = &simple_dir_operations;
@@ -555,6 +646,30 @@ spufs_parse_options(char *options, struct inode *root)
555 return 1; 646 return 1;
556} 647}
557 648
649static void
650spufs_init_isolated_loader(void)
651{
652 struct device_node *dn;
653 const char *loader;
654 int size;
655
656 dn = of_find_node_by_path("/spu-isolation");
657 if (!dn)
658 return;
659
660 loader = get_property(dn, "loader", &size);
661 if (!loader)
662 return;
663
664 /* kmalloc should align on a 16 byte boundary..* */
665 isolated_loader = kmalloc(size, GFP_KERNEL);
666 if (!isolated_loader)
667 return;
668
669 memcpy(isolated_loader, loader, size);
670 printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
671}
672
558static int 673static int
559spufs_create_root(struct super_block *sb, void *data) 674spufs_create_root(struct super_block *sb, void *data)
560{ 675{
@@ -640,6 +755,8 @@ static int __init spufs_init(void)
640 ret = register_spu_syscalls(&spufs_calls); 755 ret = register_spu_syscalls(&spufs_calls);
641 if (ret) 756 if (ret)
642 goto out_fs; 757 goto out_fs;
758
759 spufs_init_isolated_loader();
643 return 0; 760 return 0;
644out_fs: 761out_fs:
645 unregister_filesystem(&spufs_type); 762 unregister_filesystem(&spufs_type);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 0c03a04b6a..a4a0080c22 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -1,3 +1,5 @@
1#define DEBUG
2
1#include <linux/wait.h> 3#include <linux/wait.h>
2#include <linux/ptrace.h> 4#include <linux/ptrace.h>
3 5
@@ -56,12 +58,12 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
56 if ((ret = spu_acquire_runnable(ctx)) != 0) 58 if ((ret = spu_acquire_runnable(ctx)) != 0)
57 return ret; 59 return ret;
58 60
59 if (ctx->flags & SPU_CREATE_ISOLATE) 61 /* if we're in isolated mode, we would have started the SPU
60 runcntl |= SPU_RUNCNTL_ISOLATE; 62 * earlier, so don't do it again now. */
61 else 63 if (!(ctx->flags & SPU_CREATE_ISOLATE)) {
62 ctx->ops->npc_write(ctx, *npc); 64 ctx->ops->npc_write(ctx, *npc);
63 65 ctx->ops->runcntl_write(ctx, runcntl);
64 ctx->ops->runcntl_write(ctx, runcntl); 66 }
65 return 0; 67 return 0;
66} 68}
67 69