aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spu_base.c
diff options
context:
space:
mode:
authorarnd@arndb.de <arnd@arndb.de>2006-10-24 12:31:18 -0400
committerPaul Mackerras <paulus@samba.org>2006-10-25 00:20:21 -0400
commit0afacde3df4c9980f505d9afd7cb0058389732ca (patch)
tree0007cbb88b2ed7ba604fd11ca269f24617d2bb61 /arch/powerpc/platforms/cell/spu_base.c
parenteb758ce5b0d84e13cb643b6cc7cb429f6fa28258 (diff)
[POWERPC] spufs: allow isolated mode apps by starting the SPE loader
This patch adds general support for isolated mode SPE apps. Isolated apps are started indirectly, by a dedicated loader "kernel". This patch starts the loader when spe_create is invoked with the ISOLATE flag. We do this at spe_create time to allow libspe to pass the isolated app in before calling spe_run. The loader is read from the device tree, at the location "/spu-isolation/loader". If the loader is not present, an attempt to start an isolated SPE binary will fail with -ENODEV. Update: loader needs to be correctly aligned - copy to a kmalloced buf. Update: remove workaround for systemsim/spurom 'L-bit' bug, which has been fixed. Update: don't write to runcntl on spu_run_init: SPU is already running. Update: do spu_setup_isolated earlier Tested on systemsim. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spu_base.c')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c35
1 files changed, 24 insertions, 11 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index f6c94087db40..d78b0af038e6 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -89,7 +89,30 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
89 printk("%s: invalid access during switch!\n", __func__); 89 printk("%s: invalid access during switch!\n", __func__);
90 return 1; 90 return 1;
91 } 91 }
92 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) { 92 esid = (ea & ESID_MASK) | SLB_ESID_V;
93
94 switch(REGION_ID(ea)) {
95 case USER_REGION_ID:
96#ifdef CONFIG_HUGETLB_PAGE
97 if (in_hugepage_area(mm->context, ea))
98 llp = mmu_psize_defs[mmu_huge_psize].sllp;
99 else
100#endif
101 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
102 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
103 SLB_VSID_USER | llp;
104 break;
105 case VMALLOC_REGION_ID:
106 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
107 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
108 SLB_VSID_KERNEL | llp;
109 break;
110 case KERNEL_REGION_ID:
111 llp = mmu_psize_defs[mmu_linear_psize].sllp;
112 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
113 SLB_VSID_KERNEL | llp;
114 break;
115 default:
93 /* Future: support kernel segments so that drivers 116 /* Future: support kernel segments so that drivers
94 * can use SPUs. 117 * can use SPUs.
95 */ 118 */
@@ -97,16 +120,6 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
97 return 1; 120 return 1;
98 } 121 }
99 122
100 esid = (ea & ESID_MASK) | SLB_ESID_V;
101#ifdef CONFIG_HUGETLB_PAGE
102 if (in_hugepage_area(mm->context, ea))
103 llp = mmu_psize_defs[mmu_huge_psize].sllp;
104 else
105#endif
106 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
107 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
108 SLB_VSID_USER | llp;
109
110 out_be64(&priv2->slb_index_W, spu->slb_replace); 123 out_be64(&priv2->slb_index_W, spu->slb_replace);
111 out_be64(&priv2->slb_vsid_RW, vsid); 124 out_be64(&priv2->slb_vsid_RW, vsid);
112 out_be64(&priv2->slb_esid_RW, esid); 125 out_be64(&priv2->slb_esid_RW, esid);