diff options
author | Christoph Hellwig <hch@lst.de> | 2007-02-13 15:36:50 -0500 |
---|---|---|
committer | Arnd Bergmann <arnd@klappe.arndb.de> | 2007-02-13 15:52:37 -0500 |
commit | 650f8b0291ecd0abdeadbd0ff3d70c3538e55405 (patch) | |
tree | 3d3df208380ac7b2fafdd03b5fbcc01d2dedd934 /arch/powerpc/platforms/cell/spufs/context.c | |
parent | 202557d29eae528f464652e92085f3b19b05a0a7 (diff) |
[POWERPC] spufs: simplify state_mutex
The r/w semaphore to lock the spus was overkill and can be replaced
with a mutex to make it faster, simpler and easier to debug. It also
helps to allow making most spufs interruptible in future patches.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/context.c')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/context.c | 33 |
1 files changed, 10 insertions, 23 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index ccffc449763b..c9aab9b1cd8a 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c | |||
@@ -42,7 +42,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) | |||
42 | } | 42 | } |
43 | spin_lock_init(&ctx->mmio_lock); | 43 | spin_lock_init(&ctx->mmio_lock); |
44 | kref_init(&ctx->kref); | 44 | kref_init(&ctx->kref); |
45 | init_rwsem(&ctx->state_sema); | 45 | mutex_init(&ctx->state_mutex); |
46 | init_MUTEX(&ctx->run_sema); | 46 | init_MUTEX(&ctx->run_sema); |
47 | init_waitqueue_head(&ctx->ibox_wq); | 47 | init_waitqueue_head(&ctx->ibox_wq); |
48 | init_waitqueue_head(&ctx->wbox_wq); | 48 | init_waitqueue_head(&ctx->wbox_wq); |
@@ -65,9 +65,9 @@ void destroy_spu_context(struct kref *kref) | |||
65 | { | 65 | { |
66 | struct spu_context *ctx; | 66 | struct spu_context *ctx; |
67 | ctx = container_of(kref, struct spu_context, kref); | 67 | ctx = container_of(kref, struct spu_context, kref); |
68 | down_write(&ctx->state_sema); | 68 | mutex_lock(&ctx->state_mutex); |
69 | spu_deactivate(ctx); | 69 | spu_deactivate(ctx); |
70 | up_write(&ctx->state_sema); | 70 | mutex_unlock(&ctx->state_mutex); |
71 | spu_fini_csa(&ctx->csa); | 71 | spu_fini_csa(&ctx->csa); |
72 | if (ctx->gang) | 72 | if (ctx->gang) |
73 | spu_gang_remove_ctx(ctx->gang, ctx); | 73 | spu_gang_remove_ctx(ctx->gang, ctx); |
@@ -98,12 +98,12 @@ void spu_forget(struct spu_context *ctx) | |||
98 | 98 | ||
99 | void spu_acquire(struct spu_context *ctx) | 99 | void spu_acquire(struct spu_context *ctx) |
100 | { | 100 | { |
101 | down_read(&ctx->state_sema); | 101 | mutex_lock(&ctx->state_mutex); |
102 | } | 102 | } |
103 | 103 | ||
104 | void spu_release(struct spu_context *ctx) | 104 | void spu_release(struct spu_context *ctx) |
105 | { | 105 | { |
106 | up_read(&ctx->state_sema); | 106 | mutex_unlock(&ctx->state_mutex); |
107 | } | 107 | } |
108 | 108 | ||
109 | void spu_unmap_mappings(struct spu_context *ctx) | 109 | void spu_unmap_mappings(struct spu_context *ctx) |
@@ -128,7 +128,7 @@ int spu_acquire_exclusive(struct spu_context *ctx) | |||
128 | { | 128 | { |
129 | int ret = 0; | 129 | int ret = 0; |
130 | 130 | ||
131 | down_write(&ctx->state_sema); | 131 | mutex_lock(&ctx->state_mutex); |
132 | /* ctx is about to be freed, can't acquire any more */ | 132 | /* ctx is about to be freed, can't acquire any more */ |
133 | if (!ctx->owner) { | 133 | if (!ctx->owner) { |
134 | ret = -EINVAL; | 134 | ret = -EINVAL; |
@@ -146,7 +146,7 @@ int spu_acquire_exclusive(struct spu_context *ctx) | |||
146 | 146 | ||
147 | out: | 147 | out: |
148 | if (ret) | 148 | if (ret) |
149 | up_write(&ctx->state_sema); | 149 | mutex_unlock(&ctx->state_mutex); |
150 | return ret; | 150 | return ret; |
151 | } | 151 | } |
152 | 152 | ||
@@ -154,14 +154,12 @@ int spu_acquire_runnable(struct spu_context *ctx) | |||
154 | { | 154 | { |
155 | int ret = 0; | 155 | int ret = 0; |
156 | 156 | ||
157 | down_read(&ctx->state_sema); | 157 | mutex_lock(&ctx->state_mutex); |
158 | if (ctx->state == SPU_STATE_RUNNABLE) { | 158 | if (ctx->state == SPU_STATE_RUNNABLE) { |
159 | ctx->spu->prio = current->prio; | 159 | ctx->spu->prio = current->prio; |
160 | return 0; | 160 | return 0; |
161 | } | 161 | } |
162 | up_read(&ctx->state_sema); | ||
163 | 162 | ||
164 | down_write(&ctx->state_sema); | ||
165 | /* ctx is about to be freed, can't acquire any more */ | 163 | /* ctx is about to be freed, can't acquire any more */ |
166 | if (!ctx->owner) { | 164 | if (!ctx->owner) { |
167 | ret = -EINVAL; | 165 | ret = -EINVAL; |
@@ -174,29 +172,18 @@ int spu_acquire_runnable(struct spu_context *ctx) | |||
174 | goto out; | 172 | goto out; |
175 | } | 173 | } |
176 | 174 | ||
177 | downgrade_write(&ctx->state_sema); | ||
178 | /* On success, we return holding the lock */ | 175 | /* On success, we return holding the lock */ |
179 | |||
180 | return ret; | 176 | return ret; |
181 | out: | 177 | out: |
182 | /* Release here, to simplify calling code. */ | 178 | /* Release here, to simplify calling code. */ |
183 | up_write(&ctx->state_sema); | 179 | mutex_unlock(&ctx->state_mutex); |
184 | 180 | ||
185 | return ret; | 181 | return ret; |
186 | } | 182 | } |
187 | 183 | ||
188 | void spu_acquire_saved(struct spu_context *ctx) | 184 | void spu_acquire_saved(struct spu_context *ctx) |
189 | { | 185 | { |
190 | down_read(&ctx->state_sema); | 186 | mutex_lock(&ctx->state_mutex); |
191 | |||
192 | if (ctx->state == SPU_STATE_SAVED) | ||
193 | return; | ||
194 | |||
195 | up_read(&ctx->state_sema); | ||
196 | down_write(&ctx->state_sema); | ||
197 | |||
198 | if (ctx->state == SPU_STATE_RUNNABLE) | 187 | if (ctx->state == SPU_STATE_RUNNABLE) |
199 | spu_deactivate(ctx); | 188 | spu_deactivate(ctx); |
200 | |||
201 | downgrade_write(&ctx->state_sema); | ||
202 | } | 189 | } |