aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJeremy Kerr <jk@ozlabs.org>2006-11-20 12:45:10 -0500
committerPaul Mackerras <paulus@samba.org>2006-12-04 04:40:06 -0500
commitc6730ed4c280ff9e55766796523c94a7d111da09 (patch)
tree6635d2a52f7e8021e4565edd0b66a752d4d699ad /arch
parent3960c260204bc33404a6e54e9dcd44f1f83bc701 (diff)
[POWERPC] spufs: Load isolation kernel from spu_run
In order to fit with the "don't-run-spus-outside-of-spu_run" model, this patch starts the isolated-mode loader in spu_run, rather than spu_create. If spu_run is passed an isolated-mode context that isn't in isolated mode state, it will run the loader. This fixes potential races with the isolated SPE app doing a stop-and-signal before the PPE has called spu_run: bugzilla #29111. Also (in conjunction with a mambo patch), this addresses #28565, as we always set the runcntrl register when entering spu_run. It is up to libspe to ensure that isolated-mode apps are cleaned up after running to completion - ie, put the app through the "ISOLATE EXIT" state (see Ch11 of the CBEA). Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c32
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c109
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c117
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h3
4 files changed, 113 insertions, 148 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index e6667530332b..50e0afc46ad2 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1358,37 +1358,6 @@ static struct file_operations spufs_mfc_fops = {
1358 .mmap = spufs_mfc_mmap, 1358 .mmap = spufs_mfc_mmap,
1359}; 1359};
1360 1360
1361
1362static int spufs_recycle_open(struct inode *inode, struct file *file)
1363{
1364 file->private_data = SPUFS_I(inode)->i_ctx;
1365 return nonseekable_open(inode, file);
1366}
1367
1368static ssize_t spufs_recycle_write(struct file *file,
1369 const char __user *buffer, size_t size, loff_t *pos)
1370{
1371 struct spu_context *ctx = file->private_data;
1372 int ret;
1373
1374 if (!(ctx->flags & SPU_CREATE_ISOLATE))
1375 return -EINVAL;
1376
1377 if (size < 1)
1378 return -EINVAL;
1379
1380 ret = spu_recycle_isolated(ctx);
1381
1382 if (ret)
1383 return ret;
1384 return size;
1385}
1386
1387static struct file_operations spufs_recycle_fops = {
1388 .open = spufs_recycle_open,
1389 .write = spufs_recycle_write,
1390};
1391
1392static void spufs_npc_set(void *data, u64 val) 1361static void spufs_npc_set(void *data, u64 val)
1393{ 1362{
1394 struct spu_context *ctx = data; 1363 struct spu_context *ctx = data;
@@ -1789,6 +1758,5 @@ struct tree_descr spufs_dir_nosched_contents[] = {
1789 { "psmap", &spufs_psmap_fops, 0666, }, 1758 { "psmap", &spufs_psmap_fops, 0666, },
1790 { "phys-id", &spufs_id_ops, 0666, }, 1759 { "phys-id", &spufs_id_ops, 0666, },
1791 { "object-id", &spufs_object_id_ops, 0666, }, 1760 { "object-id", &spufs_object_id_ops, 0666, },
1792 { "recycle", &spufs_recycle_fops, 0222, },
1793 {}, 1761 {},
1794}; 1762};
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 1fbcc5369243..d5f0a21a19d8 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -34,8 +34,6 @@
34#include <linux/parser.h> 34#include <linux/parser.h>
35 35
36#include <asm/prom.h> 36#include <asm/prom.h>
37#include <asm/spu_priv1.h>
38#include <asm/io.h>
39#include <asm/semaphore.h> 37#include <asm/semaphore.h>
40#include <asm/spu.h> 38#include <asm/spu.h>
41#include <asm/uaccess.h> 39#include <asm/uaccess.h>
@@ -43,7 +41,7 @@
43#include "spufs.h" 41#include "spufs.h"
44 42
45static kmem_cache_t *spufs_inode_cache; 43static kmem_cache_t *spufs_inode_cache;
46static char *isolated_loader; 44char *isolated_loader;
47 45
48static struct inode * 46static struct inode *
49spufs_alloc_inode(struct super_block *sb) 47spufs_alloc_inode(struct super_block *sb)
@@ -235,102 +233,6 @@ struct file_operations spufs_context_fops = {
235 .fsync = simple_sync_file, 233 .fsync = simple_sync_file,
236}; 234};
237 235
238static int spu_setup_isolated(struct spu_context *ctx)
239{
240 int ret;
241 u64 __iomem *mfc_cntl;
242 u64 sr1;
243 u32 status;
244 unsigned long timeout;
245 const u32 status_loading = SPU_STATUS_RUNNING
246 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
247
248 if (!isolated_loader)
249 return -ENODEV;
250
251 /* prevent concurrent operation with spu_run */
252 down(&ctx->run_sema);
253 ctx->ops->master_start(ctx);
254
255 ret = spu_acquire_exclusive(ctx);
256 if (ret)
257 goto out;
258
259 mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
260
261 /* purge the MFC DMA queue to ensure no spurious accesses before we
262 * enter kernel mode */
263 timeout = jiffies + HZ;
264 out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
265 while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
266 != MFC_CNTL_PURGE_DMA_COMPLETE) {
267 if (time_after(jiffies, timeout)) {
268 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
269 __FUNCTION__);
270 ret = -EIO;
271 goto out_unlock;
272 }
273 cond_resched();
274 }
275
276 /* put the SPE in kernel mode to allow access to the loader */
277 sr1 = spu_mfc_sr1_get(ctx->spu);
278 sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
279 spu_mfc_sr1_set(ctx->spu, sr1);
280
281 /* start the loader */
282 ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
283 ctx->ops->signal2_write(ctx,
284 (unsigned long)isolated_loader & 0xffffffff);
285
286 ctx->ops->runcntl_write(ctx,
287 SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
288
289 ret = 0;
290 timeout = jiffies + HZ;
291 while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
292 status_loading) {
293 if (time_after(jiffies, timeout)) {
294 printk(KERN_ERR "%s: timeout waiting for loader\n",
295 __FUNCTION__);
296 ret = -EIO;
297 goto out_drop_priv;
298 }
299 cond_resched();
300 }
301
302 if (!(status & SPU_STATUS_RUNNING)) {
303 /* If isolated LOAD has failed: run SPU, we will get a stop-and
304 * signal later. */
305 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
306 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
307 ret = -EACCES;
308
309 } else if (!(status & SPU_STATUS_ISOLATED_STATE)) {
310 /* This isn't allowed by the CBEA, but check anyway */
311 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
312 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
313 ret = -EINVAL;
314 }
315
316out_drop_priv:
317 /* Finished accessing the loader. Drop kernel mode */
318 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
319 spu_mfc_sr1_set(ctx->spu, sr1);
320
321out_unlock:
322 spu_release_exclusive(ctx);
323out:
324 ctx->ops->master_stop(ctx);
325 up(&ctx->run_sema);
326 return ret;
327}
328
329int spu_recycle_isolated(struct spu_context *ctx)
330{
331 return spu_setup_isolated(ctx);
332}
333
334static int 236static int
335spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, 237spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
336 int mode) 238 int mode)
@@ -439,15 +341,6 @@ static int spufs_create_context(struct inode *inode,
439out_unlock: 341out_unlock:
440 mutex_unlock(&inode->i_mutex); 342 mutex_unlock(&inode->i_mutex);
441out: 343out:
442 if (ret >= 0 && (flags & SPU_CREATE_ISOLATE)) {
443 int setup_err = spu_setup_isolated(
444 SPUFS_I(dentry->d_inode)->i_ctx);
445 /* FIXME: clean up context again on failure to avoid
446 leak. */
447 if (setup_err)
448 ret = setup_err;
449 }
450
451 dput(dentry); 344 dput(dentry);
452 return ret; 345 return ret;
453} 346}
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 212b9c2f04ab..1be4e3339d8e 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -4,6 +4,8 @@
4#include <linux/ptrace.h> 4#include <linux/ptrace.h>
5 5
6#include <asm/spu.h> 6#include <asm/spu.h>
7#include <asm/spu_priv1.h>
8#include <asm/io.h>
7#include <asm/unistd.h> 9#include <asm/unistd.h>
8 10
9#include "spufs.h" 11#include "spufs.h"
@@ -51,21 +53,122 @@ static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
51 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0; 53 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
52} 54}
53 55
56static int spu_setup_isolated(struct spu_context *ctx)
57{
58 int ret;
59 u64 __iomem *mfc_cntl;
60 u64 sr1;
61 u32 status;
62 unsigned long timeout;
63 const u32 status_loading = SPU_STATUS_RUNNING
64 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
65
66 if (!isolated_loader)
67 return -ENODEV;
68
69 ret = spu_acquire_exclusive(ctx);
70 if (ret)
71 goto out;
72
73 mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
74
75 /* purge the MFC DMA queue to ensure no spurious accesses before we
76 * enter kernel mode */
77 timeout = jiffies + HZ;
78 out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
79 while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
80 != MFC_CNTL_PURGE_DMA_COMPLETE) {
81 if (time_after(jiffies, timeout)) {
82 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
83 __FUNCTION__);
84 ret = -EIO;
85 goto out_unlock;
86 }
87 cond_resched();
88 }
89
90 /* put the SPE in kernel mode to allow access to the loader */
91 sr1 = spu_mfc_sr1_get(ctx->spu);
92 sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
93 spu_mfc_sr1_set(ctx->spu, sr1);
94
95 /* start the loader */
96 ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
97 ctx->ops->signal2_write(ctx,
98 (unsigned long)isolated_loader & 0xffffffff);
99
100 ctx->ops->runcntl_write(ctx,
101 SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
102
103 ret = 0;
104 timeout = jiffies + HZ;
105 while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
106 status_loading) {
107 if (time_after(jiffies, timeout)) {
108 printk(KERN_ERR "%s: timeout waiting for loader\n",
109 __FUNCTION__);
110 ret = -EIO;
111 goto out_drop_priv;
112 }
113 cond_resched();
114 }
115
116 if (!(status & SPU_STATUS_RUNNING)) {
117 /* If isolated LOAD has failed: run SPU, we will get a stop-and
118 * signal later. */
119 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
120 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
121 ret = -EACCES;
122
123 } else if (!(status & SPU_STATUS_ISOLATED_STATE)) {
124 /* This isn't allowed by the CBEA, but check anyway */
125 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
126 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
127 ret = -EINVAL;
128 }
129
130out_drop_priv:
131 /* Finished accessing the loader. Drop kernel mode */
132 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
133 spu_mfc_sr1_set(ctx->spu, sr1);
134
135out_unlock:
136 spu_release_exclusive(ctx);
137out:
138 return ret;
139}
140
54static inline int spu_run_init(struct spu_context *ctx, u32 * npc) 141static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
55{ 142{
56 int ret; 143 int ret;
57 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; 144 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
58 145
59 if ((ret = spu_acquire_runnable(ctx)) != 0) 146 ret = spu_acquire_runnable(ctx);
147 if (ret)
60 return ret; 148 return ret;
61 149
62 /* if we're in isolated mode, we would have started the SPU 150 if (ctx->flags & SPU_CREATE_ISOLATE) {
63 * earlier, so don't do it again now. */ 151 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
64 if (!(ctx->flags & SPU_CREATE_ISOLATE)) { 152 /* Need to release ctx, because spu_setup_isolated will
153 * acquire it exclusively.
154 */
155 spu_release(ctx);
156 ret = spu_setup_isolated(ctx);
157 if (!ret)
158 ret = spu_acquire_runnable(ctx);
159 }
160
161 /* if userspace has set the runcntrl register (eg, to issue an
162 * isolated exit), we need to re-set it here */
163 runcntl = ctx->ops->runcntl_read(ctx) &
164 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
165 if (runcntl == 0)
166 runcntl = SPU_RUNCNTL_RUNNABLE;
167 } else
65 ctx->ops->npc_write(ctx, *npc); 168 ctx->ops->npc_write(ctx, *npc);
66 ctx->ops->runcntl_write(ctx, runcntl); 169
67 } 170 ctx->ops->runcntl_write(ctx, runcntl);
68 return 0; 171 return ret;
69} 172}
70 173
71static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, 174static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index ca56b9b11c1d..23d20f380560 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -183,7 +183,8 @@ void spu_yield(struct spu_context *ctx);
183int __init spu_sched_init(void); 183int __init spu_sched_init(void);
184void __exit spu_sched_exit(void); 184void __exit spu_sched_exit(void);
185 185
186int spu_recycle_isolated(struct spu_context *ctx); 186extern char *isolated_loader;
187
187/* 188/*
188 * spufs_wait 189 * spufs_wait
189 * Same as wait_event_interruptible(), except that here 190 * Same as wait_event_interruptible(), except that here