aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-02-13 15:54:21 -0500
committerArnd Bergmann <arnd@klappe.arndb.de>2007-02-13 15:55:40 -0500
commit6a0641e51011def4e308fd07387047f5ee50647f (patch)
treede784f4ec06cabadc80a5689b57bdfb475979473 /arch/powerpc
parent650f8b0291ecd0abdeadbd0ff3d70c3538e55405 (diff)
[POWERPC] spufs: state_mutex cleanup
Various cleanups in code surrounding the state semaphore: - inline spu_acquire/spu_release - cleanup spu_acquire_* and add kerneldoc comments to these functions - remove spu_release_exclusive and replace it with spu_release Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c97
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h18
3 files changed, 64 insertions, 53 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index c9aab9b1cd8..f2630dc0db8 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -96,16 +96,6 @@ void spu_forget(struct spu_context *ctx)
96 spu_release(ctx); 96 spu_release(ctx);
97} 97}
98 98
99void spu_acquire(struct spu_context *ctx)
100{
101 mutex_lock(&ctx->state_mutex);
102}
103
104void spu_release(struct spu_context *ctx)
105{
106 mutex_unlock(&ctx->state_mutex);
107}
108
109void spu_unmap_mappings(struct spu_context *ctx) 99void spu_unmap_mappings(struct spu_context *ctx)
110{ 100{
111 if (ctx->local_store) 101 if (ctx->local_store)
@@ -124,66 +114,85 @@ void spu_unmap_mappings(struct spu_context *ctx)
124 unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); 114 unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
125} 115}
126 116
117/**
118 * spu_acquire_exclusive - lock spu contex and protect against userspace access
119 * @ctx: spu contex to lock
120 *
121 * Note:
122 * Returns 0 and with the context locked on success
123 * Returns negative error and with the context _unlocked_ on failure.
124 */
127int spu_acquire_exclusive(struct spu_context *ctx) 125int spu_acquire_exclusive(struct spu_context *ctx)
128{ 126{
129 int ret = 0; 127 int ret = -EINVAL;
130 128
131 mutex_lock(&ctx->state_mutex); 129 spu_acquire(ctx);
132 /* ctx is about to be freed, can't acquire any more */ 130 /*
133 if (!ctx->owner) { 131 * Context is about to be freed, so we can't acquire it anymore.
134 ret = -EINVAL; 132 */
135 goto out; 133 if (!ctx->owner)
136 } 134 goto out_unlock;
137 135
138 if (ctx->state == SPU_STATE_SAVED) { 136 if (ctx->state == SPU_STATE_SAVED) {
139 ret = spu_activate(ctx, 0); 137 ret = spu_activate(ctx, 0);
140 if (ret) 138 if (ret)
141 goto out; 139 goto out_unlock;
142 } else { 140 } else {
143 /* We need to exclude userspace access to the context. */ 141 /*
142 * We need to exclude userspace access to the context.
143 *
144 * To protect against memory access we invalidate all ptes
145 * and make sure the pagefault handlers block on the mutex.
146 */
144 spu_unmap_mappings(ctx); 147 spu_unmap_mappings(ctx);
145 } 148 }
146 149
147out: 150 return 0;
148 if (ret) 151
149 mutex_unlock(&ctx->state_mutex); 152 out_unlock:
153 spu_release(ctx);
150 return ret; 154 return ret;
151} 155}
152 156
157/**
158 * spu_acquire_runnable - lock spu contex and make sure it is in runnable state
159 * @ctx: spu contex to lock
160 *
161 * Note:
162 * Returns 0 and with the context locked on success
163 * Returns negative error and with the context _unlocked_ on failure.
164 */
153int spu_acquire_runnable(struct spu_context *ctx) 165int spu_acquire_runnable(struct spu_context *ctx)
154{ 166{
155 int ret = 0; 167 int ret = -EINVAL;
156
157 mutex_lock(&ctx->state_mutex);
158 if (ctx->state == SPU_STATE_RUNNABLE) {
159 ctx->spu->prio = current->prio;
160 return 0;
161 }
162
163 /* ctx is about to be freed, can't acquire any more */
164 if (!ctx->owner) {
165 ret = -EINVAL;
166 goto out;
167 }
168 168
169 spu_acquire(ctx);
169 if (ctx->state == SPU_STATE_SAVED) { 170 if (ctx->state == SPU_STATE_SAVED) {
171 /*
172 * Context is about to be freed, so we can't acquire it anymore.
173 */
174 if (!ctx->owner)
175 goto out_unlock;
170 ret = spu_activate(ctx, 0); 176 ret = spu_activate(ctx, 0);
171 if (ret) 177 if (ret)
172 goto out; 178 goto out_unlock;
173 } 179 } else
180 ctx->spu->prio = current->prio;
174 181
175 /* On success, we return holding the lock */ 182 return 0;
176 return ret;
177out:
178 /* Release here, to simplify calling code. */
179 mutex_unlock(&ctx->state_mutex);
180 183
184 out_unlock:
185 spu_release(ctx);
181 return ret; 186 return ret;
182} 187}
183 188
189/**
190 * spu_acquire_saved - lock spu contex and make sure it is in saved state
191 * @ctx: spu contex to lock
192 */
184void spu_acquire_saved(struct spu_context *ctx) 193void spu_acquire_saved(struct spu_context *ctx)
185{ 194{
186 mutex_lock(&ctx->state_mutex); 195 spu_acquire(ctx);
187 if (ctx->state == SPU_STATE_RUNNABLE) 196 if (ctx->state != SPU_STATE_SAVED)
188 spu_deactivate(ctx); 197 spu_deactivate(ctx);
189} 198}
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 51b78da2f0d..e1647311044 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -133,7 +133,7 @@ out_drop_priv:
133 spu_mfc_sr1_set(ctx->spu, sr1); 133 spu_mfc_sr1_set(ctx->spu, sr1);
134 134
135out_unlock: 135out_unlock:
136 spu_release_exclusive(ctx); 136 spu_release(ctx);
137out: 137out:
138 return ret; 138 return ret;
139} 139}
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index de2401afb22..fa07ec2e2c1 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -158,6 +158,16 @@ void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
158void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx); 158void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
159 159
160/* context management */ 160/* context management */
161static inline void spu_acquire(struct spu_context *ctx)
162{
163 mutex_lock(&ctx->state_mutex);
164}
165
166static inline void spu_release(struct spu_context *ctx)
167{
168 mutex_unlock(&ctx->state_mutex);
169}
170
161struct spu_context * alloc_spu_context(struct spu_gang *gang); 171struct spu_context * alloc_spu_context(struct spu_gang *gang);
162void destroy_spu_context(struct kref *kref); 172void destroy_spu_context(struct kref *kref);
163struct spu_context * get_spu_context(struct spu_context *ctx); 173struct spu_context * get_spu_context(struct spu_context *ctx);
@@ -165,17 +175,9 @@ int put_spu_context(struct spu_context *ctx);
165void spu_unmap_mappings(struct spu_context *ctx); 175void spu_unmap_mappings(struct spu_context *ctx);
166 176
167void spu_forget(struct spu_context *ctx); 177void spu_forget(struct spu_context *ctx);
168void spu_acquire(struct spu_context *ctx);
169void spu_release(struct spu_context *ctx);
170int spu_acquire_runnable(struct spu_context *ctx); 178int spu_acquire_runnable(struct spu_context *ctx);
171void spu_acquire_saved(struct spu_context *ctx); 179void spu_acquire_saved(struct spu_context *ctx);
172int spu_acquire_exclusive(struct spu_context *ctx); 180int spu_acquire_exclusive(struct spu_context *ctx);
173
174static inline void spu_release_exclusive(struct spu_context *ctx)
175{
176 mutex_unlock(&ctx->state_mutex);
177}
178
179int spu_activate(struct spu_context *ctx, u64 flags); 181int spu_activate(struct spu_context *ctx, u64 flags);
180void spu_deactivate(struct spu_context *ctx); 182void spu_deactivate(struct spu_context *ctx);
181void spu_yield(struct spu_context *ctx); 183void spu_yield(struct spu_context *ctx);