diff options
author | Christoph Hellwig <hch@lst.de> | 2007-02-13 15:36:50 -0500 |
---|---|---|
committer | Arnd Bergmann <arnd@klappe.arndb.de> | 2007-02-13 15:52:37 -0500 |
commit | 650f8b0291ecd0abdeadbd0ff3d70c3538e55405 (patch) | |
tree | 3d3df208380ac7b2fafdd03b5fbcc01d2dedd934 /arch | |
parent | 202557d29eae528f464652e92085f3b19b05a0a7 (diff) |
[POWERPC] spufs: simplify state_mutex
The r/w semaphore to lock the spus was overkill and can be replaced
with a mutex to make it faster, simpler and easier to debug. It also
helps to allow making most spufs interruptible in future patches.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/context.c | 33 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 8 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 6 |
3 files changed, 17 insertions, 30 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index ccffc449763b..c9aab9b1cd8a 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c | |||
@@ -42,7 +42,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) | |||
42 | } | 42 | } |
43 | spin_lock_init(&ctx->mmio_lock); | 43 | spin_lock_init(&ctx->mmio_lock); |
44 | kref_init(&ctx->kref); | 44 | kref_init(&ctx->kref); |
45 | init_rwsem(&ctx->state_sema); | 45 | mutex_init(&ctx->state_mutex); |
46 | init_MUTEX(&ctx->run_sema); | 46 | init_MUTEX(&ctx->run_sema); |
47 | init_waitqueue_head(&ctx->ibox_wq); | 47 | init_waitqueue_head(&ctx->ibox_wq); |
48 | init_waitqueue_head(&ctx->wbox_wq); | 48 | init_waitqueue_head(&ctx->wbox_wq); |
@@ -65,9 +65,9 @@ void destroy_spu_context(struct kref *kref) | |||
65 | { | 65 | { |
66 | struct spu_context *ctx; | 66 | struct spu_context *ctx; |
67 | ctx = container_of(kref, struct spu_context, kref); | 67 | ctx = container_of(kref, struct spu_context, kref); |
68 | down_write(&ctx->state_sema); | 68 | mutex_lock(&ctx->state_mutex); |
69 | spu_deactivate(ctx); | 69 | spu_deactivate(ctx); |
70 | up_write(&ctx->state_sema); | 70 | mutex_unlock(&ctx->state_mutex); |
71 | spu_fini_csa(&ctx->csa); | 71 | spu_fini_csa(&ctx->csa); |
72 | if (ctx->gang) | 72 | if (ctx->gang) |
73 | spu_gang_remove_ctx(ctx->gang, ctx); | 73 | spu_gang_remove_ctx(ctx->gang, ctx); |
@@ -98,12 +98,12 @@ void spu_forget(struct spu_context *ctx) | |||
98 | 98 | ||
99 | void spu_acquire(struct spu_context *ctx) | 99 | void spu_acquire(struct spu_context *ctx) |
100 | { | 100 | { |
101 | down_read(&ctx->state_sema); | 101 | mutex_lock(&ctx->state_mutex); |
102 | } | 102 | } |
103 | 103 | ||
104 | void spu_release(struct spu_context *ctx) | 104 | void spu_release(struct spu_context *ctx) |
105 | { | 105 | { |
106 | up_read(&ctx->state_sema); | 106 | mutex_unlock(&ctx->state_mutex); |
107 | } | 107 | } |
108 | 108 | ||
109 | void spu_unmap_mappings(struct spu_context *ctx) | 109 | void spu_unmap_mappings(struct spu_context *ctx) |
@@ -128,7 +128,7 @@ int spu_acquire_exclusive(struct spu_context *ctx) | |||
128 | { | 128 | { |
129 | int ret = 0; | 129 | int ret = 0; |
130 | 130 | ||
131 | down_write(&ctx->state_sema); | 131 | mutex_lock(&ctx->state_mutex); |
132 | /* ctx is about to be freed, can't acquire any more */ | 132 | /* ctx is about to be freed, can't acquire any more */ |
133 | if (!ctx->owner) { | 133 | if (!ctx->owner) { |
134 | ret = -EINVAL; | 134 | ret = -EINVAL; |
@@ -146,7 +146,7 @@ int spu_acquire_exclusive(struct spu_context *ctx) | |||
146 | 146 | ||
147 | out: | 147 | out: |
148 | if (ret) | 148 | if (ret) |
149 | up_write(&ctx->state_sema); | 149 | mutex_unlock(&ctx->state_mutex); |
150 | return ret; | 150 | return ret; |
151 | } | 151 | } |
152 | 152 | ||
@@ -154,14 +154,12 @@ int spu_acquire_runnable(struct spu_context *ctx) | |||
154 | { | 154 | { |
155 | int ret = 0; | 155 | int ret = 0; |
156 | 156 | ||
157 | down_read(&ctx->state_sema); | 157 | mutex_lock(&ctx->state_mutex); |
158 | if (ctx->state == SPU_STATE_RUNNABLE) { | 158 | if (ctx->state == SPU_STATE_RUNNABLE) { |
159 | ctx->spu->prio = current->prio; | 159 | ctx->spu->prio = current->prio; |
160 | return 0; | 160 | return 0; |
161 | } | 161 | } |
162 | up_read(&ctx->state_sema); | ||
163 | 162 | ||
164 | down_write(&ctx->state_sema); | ||
165 | /* ctx is about to be freed, can't acquire any more */ | 163 | /* ctx is about to be freed, can't acquire any more */ |
166 | if (!ctx->owner) { | 164 | if (!ctx->owner) { |
167 | ret = -EINVAL; | 165 | ret = -EINVAL; |
@@ -174,29 +172,18 @@ int spu_acquire_runnable(struct spu_context *ctx) | |||
174 | goto out; | 172 | goto out; |
175 | } | 173 | } |
176 | 174 | ||
177 | downgrade_write(&ctx->state_sema); | ||
178 | /* On success, we return holding the lock */ | 175 | /* On success, we return holding the lock */ |
179 | |||
180 | return ret; | 176 | return ret; |
181 | out: | 177 | out: |
182 | /* Release here, to simplify calling code. */ | 178 | /* Release here, to simplify calling code. */ |
183 | up_write(&ctx->state_sema); | 179 | mutex_unlock(&ctx->state_mutex); |
184 | 180 | ||
185 | return ret; | 181 | return ret; |
186 | } | 182 | } |
187 | 183 | ||
188 | void spu_acquire_saved(struct spu_context *ctx) | 184 | void spu_acquire_saved(struct spu_context *ctx) |
189 | { | 185 | { |
190 | down_read(&ctx->state_sema); | 186 | mutex_lock(&ctx->state_mutex); |
191 | |||
192 | if (ctx->state == SPU_STATE_SAVED) | ||
193 | return; | ||
194 | |||
195 | up_read(&ctx->state_sema); | ||
196 | down_write(&ctx->state_sema); | ||
197 | |||
198 | if (ctx->state == SPU_STATE_RUNNABLE) | 187 | if (ctx->state == SPU_STATE_RUNNABLE) |
199 | spu_deactivate(ctx); | 188 | spu_deactivate(ctx); |
200 | |||
201 | downgrade_write(&ctx->state_sema); | ||
202 | } | 189 | } |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 1d330f67f5ae..c61a34b14083 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -233,11 +233,11 @@ static void spu_prio_wait(struct spu_context *ctx, u64 flags) | |||
233 | spu_add_wq(wq, &wait, prio); | 233 | spu_add_wq(wq, &wait, prio); |
234 | 234 | ||
235 | if (!signal_pending(current)) { | 235 | if (!signal_pending(current)) { |
236 | up_write(&ctx->state_sema); | 236 | mutex_unlock(&ctx->state_mutex); |
237 | pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__, | 237 | pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__, |
238 | current->pid, current->prio); | 238 | current->pid, current->prio); |
239 | schedule(); | 239 | schedule(); |
240 | down_write(&ctx->state_sema); | 240 | mutex_lock(&ctx->state_mutex); |
241 | } | 241 | } |
242 | 242 | ||
243 | spu_del_wq(wq, &wait, prio); | 243 | spu_del_wq(wq, &wait, prio); |
@@ -334,7 +334,7 @@ void spu_yield(struct spu_context *ctx) | |||
334 | struct spu *spu; | 334 | struct spu *spu; |
335 | int need_yield = 0; | 335 | int need_yield = 0; |
336 | 336 | ||
337 | if (down_write_trylock(&ctx->state_sema)) { | 337 | if (mutex_trylock(&ctx->state_mutex)) { |
338 | if ((spu = ctx->spu) != NULL) { | 338 | if ((spu = ctx->spu) != NULL) { |
339 | int best = sched_find_first_bit(spu_prio->bitmap); | 339 | int best = sched_find_first_bit(spu_prio->bitmap); |
340 | if (best < MAX_PRIO) { | 340 | if (best < MAX_PRIO) { |
@@ -346,7 +346,7 @@ void spu_yield(struct spu_context *ctx) | |||
346 | spu->prio = MAX_PRIO; | 346 | spu->prio = MAX_PRIO; |
347 | } | 347 | } |
348 | } | 348 | } |
349 | up_write(&ctx->state_sema); | 349 | mutex_unlock(&ctx->state_mutex); |
350 | } | 350 | } |
351 | if (unlikely(need_yield)) | 351 | if (unlikely(need_yield)) |
352 | yield(); | 352 | yield(); |
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 9b44abe921cc..de2401afb226 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h | |||
@@ -23,7 +23,7 @@ | |||
23 | #define SPUFS_H | 23 | #define SPUFS_H |
24 | 24 | ||
25 | #include <linux/kref.h> | 25 | #include <linux/kref.h> |
26 | #include <linux/rwsem.h> | 26 | #include <linux/mutex.h> |
27 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
28 | #include <linux/fs.h> | 28 | #include <linux/fs.h> |
29 | 29 | ||
@@ -53,7 +53,7 @@ struct spu_context { | |||
53 | u64 object_id; /* user space pointer for oprofile */ | 53 | u64 object_id; /* user space pointer for oprofile */ |
54 | 54 | ||
55 | enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; | 55 | enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; |
56 | struct rw_semaphore state_sema; | 56 | struct mutex state_mutex; |
57 | struct semaphore run_sema; | 57 | struct semaphore run_sema; |
58 | 58 | ||
59 | struct mm_struct *owner; | 59 | struct mm_struct *owner; |
@@ -173,7 +173,7 @@ int spu_acquire_exclusive(struct spu_context *ctx); | |||
173 | 173 | ||
174 | static inline void spu_release_exclusive(struct spu_context *ctx) | 174 | static inline void spu_release_exclusive(struct spu_context *ctx) |
175 | { | 175 | { |
176 | up_write(&ctx->state_sema); | 176 | mutex_unlock(&ctx->state_mutex); |
177 | } | 177 | } |
178 | 178 | ||
179 | int spu_activate(struct spu_context *ctx, u64 flags); | 179 | int spu_activate(struct spu_context *ctx, u64 flags); |