aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-04-23 15:08:12 -0400
committerArnd Bergmann <arnd@klappe.arndb.de>2007-04-23 15:18:54 -0400
commit7ec18ab923a2e377ecb05c74a2d38f457f79950f (patch)
treeb722d8063bfc4b1b44ba67083649efab0c2e5a64
parenta475c2f43520cb095452201da57395000cfeb94c (diff)
[POWERPC] spufs: streamline locking for isolated spu setup
For quite a while now spu state is protected by a simple mutex instead of the old rw_semaphore, and this means we can simplify the locking around spu_setup_isolated a lot. Instead of doing an spu_release before entering spu_setup_isolated and then calling the complicated spu_acquire_exclusive we can now simply enter the function locked an in guaranteed runnable state, so that the only bit of spu_acquire_exclusive that's left is the call to spu_unmap_mappings. Similarly there's no more need to unlock and reacquire the state_mutex when spu_setup_isolated is done, but we can always return with the lock held and only drop it in spu_run_init in the failure case. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c40
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c30
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h1
3 files changed, 16 insertions, 55 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 065147fb1cc2..ce17a2847184 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -122,46 +122,6 @@ void spu_unmap_mappings(struct spu_context *ctx)
122} 122}
123 123
124/** 124/**
125 * spu_acquire_exclusive - lock spu contex and protect against userspace access
126 * @ctx: spu contex to lock
127 *
128 * Note:
129 * Returns 0 and with the context locked on success
130 * Returns negative error and with the context _unlocked_ on failure.
131 */
132int spu_acquire_exclusive(struct spu_context *ctx)
133{
134 int ret = -EINVAL;
135
136 spu_acquire(ctx);
137 /*
138 * Context is about to be freed, so we can't acquire it anymore.
139 */
140 if (!ctx->owner)
141 goto out_unlock;
142
143 if (ctx->state == SPU_STATE_SAVED) {
144 ret = spu_activate(ctx, 0);
145 if (ret)
146 goto out_unlock;
147 } else {
148 /*
149 * We need to exclude userspace access to the context.
150 *
151 * To protect against memory access we invalidate all ptes
152 * and make sure the pagefault handlers block on the mutex.
153 */
154 spu_unmap_mappings(ctx);
155 }
156
157 return 0;
158
159 out_unlock:
160 spu_release(ctx);
161 return ret;
162}
163
164/**
165 * spu_acquire_runnable - lock spu contex and make sure it is in runnable state 125 * spu_acquire_runnable - lock spu contex and make sure it is in runnable state
166 * @ctx: spu contex to lock 126 * @ctx: spu contex to lock
167 * 127 *
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index f95a611ca362..7df5202c9a90 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -63,13 +63,18 @@ static int spu_setup_isolated(struct spu_context *ctx)
63 const u32 status_loading = SPU_STATUS_RUNNING 63 const u32 status_loading = SPU_STATUS_RUNNING
64 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; 64 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
65 65
66 ret = -ENODEV;
66 if (!isolated_loader) 67 if (!isolated_loader)
67 return -ENODEV;
68
69 ret = spu_acquire_exclusive(ctx);
70 if (ret)
71 goto out; 68 goto out;
72 69
70 /*
71 * We need to exclude userspace access to the context.
72 *
73 * To protect against memory access we invalidate all ptes
74 * and make sure the pagefault handlers block on the mutex.
75 */
76 spu_unmap_mappings(ctx);
77
73 mfc_cntl = &ctx->spu->priv2->mfc_control_RW; 78 mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
74 79
75 /* purge the MFC DMA queue to ensure no spurious accesses before we 80 /* purge the MFC DMA queue to ensure no spurious accesses before we
@@ -82,7 +87,7 @@ static int spu_setup_isolated(struct spu_context *ctx)
82 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", 87 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
83 __FUNCTION__); 88 __FUNCTION__);
84 ret = -EIO; 89 ret = -EIO;
85 goto out_unlock; 90 goto out;
86 } 91 }
87 cond_resched(); 92 cond_resched();
88 } 93 }
@@ -119,12 +124,15 @@ static int spu_setup_isolated(struct spu_context *ctx)
119 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__); 124 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
120 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 125 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
121 ret = -EACCES; 126 ret = -EACCES;
127 goto out_drop_priv;
128 }
122 129
123 } else if (!(status & SPU_STATUS_ISOLATED_STATE)) { 130 if (!(status & SPU_STATUS_ISOLATED_STATE)) {
124 /* This isn't allowed by the CBEA, but check anyway */ 131 /* This isn't allowed by the CBEA, but check anyway */
125 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__); 132 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
126 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); 133 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
127 ret = -EINVAL; 134 ret = -EINVAL;
135 goto out_drop_priv;
128 } 136 }
129 137
130out_drop_priv: 138out_drop_priv:
@@ -132,8 +140,6 @@ out_drop_priv:
132 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; 140 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
133 spu_mfc_sr1_set(ctx->spu, sr1); 141 spu_mfc_sr1_set(ctx->spu, sr1);
134 142
135out_unlock:
136 spu_release(ctx);
137out: 143out:
138 return ret; 144 return ret;
139} 145}
@@ -149,13 +155,9 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
149 155
150 if (ctx->flags & SPU_CREATE_ISOLATE) { 156 if (ctx->flags & SPU_CREATE_ISOLATE) {
151 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { 157 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
152 /* Need to release ctx, because spu_setup_isolated will
153 * acquire it exclusively.
154 */
155 spu_release(ctx);
156 ret = spu_setup_isolated(ctx); 158 ret = spu_setup_isolated(ctx);
157 if (!ret) 159 if (ret)
158 ret = spu_acquire_runnable(ctx, 0); 160 spu_release(ctx);
159 } 161 }
160 162
161 /* if userspace has set the runcntrl register (eg, to issue an 163 /* if userspace has set the runcntrl register (eg, to issue an
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 0fb366d9d257..cae2ad435b0a 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -192,7 +192,6 @@ void spu_unmap_mappings(struct spu_context *ctx);
192void spu_forget(struct spu_context *ctx); 192void spu_forget(struct spu_context *ctx);
193int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags); 193int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
194void spu_acquire_saved(struct spu_context *ctx); 194void spu_acquire_saved(struct spu_context *ctx);
195int spu_acquire_exclusive(struct spu_context *ctx);
196 195
197int spu_activate(struct spu_context *ctx, unsigned long flags); 196int spu_activate(struct spu_context *ctx, unsigned long flags);
198void spu_deactivate(struct spu_context *ctx); 197void spu_deactivate(struct spu_context *ctx);