diff options
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 159 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 1 |
2 files changed, 93 insertions, 67 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 03b357ce3987..6f8e2257c5a6 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -49,7 +49,8 @@ | |||
49 | #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1) | 49 | #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1) |
50 | struct spu_prio_array { | 50 | struct spu_prio_array { |
51 | unsigned long bitmap[SPU_BITMAP_SIZE]; | 51 | unsigned long bitmap[SPU_BITMAP_SIZE]; |
52 | wait_queue_head_t waitq[MAX_PRIO]; | 52 | struct list_head runq[MAX_PRIO]; |
53 | spinlock_t runq_lock; | ||
53 | struct list_head active_list[MAX_NUMNODES]; | 54 | struct list_head active_list[MAX_NUMNODES]; |
54 | struct mutex active_mutex[MAX_NUMNODES]; | 55 | struct mutex active_mutex[MAX_NUMNODES]; |
55 | }; | 56 | }; |
@@ -196,61 +197,91 @@ static int spu_unbind_context(struct spu *spu, struct spu_context *ctx) | |||
196 | return was_active; | 197 | return was_active; |
197 | } | 198 | } |
198 | 199 | ||
199 | static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait, | 200 | /** |
200 | int prio) | 201 | * spu_add_to_rq - add a context to the runqueue |
202 | * @ctx: context to add | ||
203 | */ | ||
204 | static void spu_add_to_rq(struct spu_context *ctx) | ||
201 | { | 205 | { |
202 | prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE); | 206 | spin_lock(&spu_prio->runq_lock); |
203 | set_bit(prio, spu_prio->bitmap); | 207 | list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); |
208 | set_bit(ctx->prio, spu_prio->bitmap); | ||
209 | spin_unlock(&spu_prio->runq_lock); | ||
204 | } | 210 | } |
205 | 211 | ||
206 | static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait, | 212 | /** |
207 | int prio) | 213 | * spu_del_from_rq - remove a context from the runqueue |
214 | * @ctx: context to remove | ||
215 | */ | ||
216 | static void spu_del_from_rq(struct spu_context *ctx) | ||
208 | { | 217 | { |
209 | u64 flags; | 218 | spin_lock(&spu_prio->runq_lock); |
210 | 219 | list_del_init(&ctx->rq); | |
211 | __set_current_state(TASK_RUNNING); | 220 | if (list_empty(&spu_prio->runq[ctx->prio])) |
212 | 221 | clear_bit(ctx->prio, spu_prio->bitmap); | |
213 | spin_lock_irqsave(&wq->lock, flags); | 222 | spin_unlock(&spu_prio->runq_lock); |
223 | } | ||
214 | 224 | ||
215 | remove_wait_queue_locked(wq, wait); | 225 | /** |
216 | if (list_empty(&wq->task_list)) | 226 | * spu_grab_context - remove one context from the runqueue |
217 | clear_bit(prio, spu_prio->bitmap); | 227 | * @prio: priority of the context to be removed |
228 | * | ||
229 | * This function removes one context from the runqueue for priority @prio. | ||
230 | * If there is more than one context with the given priority the first | ||
231 | * task on the runqueue will be taken. | ||
232 | * | ||
233 | * Returns the spu_context it just removed. | ||
234 | * | ||
235 | * Must be called with spu_prio->runq_lock held. | ||
236 | */ | ||
237 | static struct spu_context *spu_grab_context(int prio) | ||
238 | { | ||
239 | struct list_head *rq = &spu_prio->runq[prio]; | ||
218 | 240 | ||
219 | spin_unlock_irqrestore(&wq->lock, flags); | 241 | if (list_empty(rq)) |
242 | return NULL; | ||
243 | return list_entry(rq->next, struct spu_context, rq); | ||
220 | } | 244 | } |
221 | 245 | ||
222 | static void spu_prio_wait(struct spu_context *ctx, u64 flags) | 246 | static void spu_prio_wait(struct spu_context *ctx) |
223 | { | 247 | { |
224 | int prio = ctx->prio; | ||
225 | wait_queue_head_t *wq = &spu_prio->waitq[prio]; | ||
226 | DEFINE_WAIT(wait); | 248 | DEFINE_WAIT(wait); |
227 | 249 | ||
228 | if (ctx->spu) | 250 | prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); |
229 | return; | ||
230 | |||
231 | spu_add_wq(wq, &wait, prio); | ||
232 | 251 | ||
233 | if (!signal_pending(current)) { | 252 | if (!signal_pending(current)) { |
234 | mutex_unlock(&ctx->state_mutex); | 253 | mutex_unlock(&ctx->state_mutex); |
235 | pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__, | ||
236 | current->pid, current->prio); | ||
237 | schedule(); | 254 | schedule(); |
238 | mutex_lock(&ctx->state_mutex); | 255 | mutex_lock(&ctx->state_mutex); |
239 | } | 256 | } |
240 | 257 | __set_current_state(TASK_RUNNING); | |
241 | spu_del_wq(wq, &wait, prio); | 258 | remove_wait_queue(&ctx->stop_wq, &wait); |
242 | } | 259 | } |
243 | 260 | ||
244 | static void spu_prio_wakeup(void) | 261 | /** |
262 | * spu_reschedule - try to find a runnable context for a spu | ||
263 | * @spu: spu available | ||
264 | * | ||
265 | * This function is called whenever a spu becomes idle. It looks for the | ||
266 | * most suitable runnable spu context and schedules it for execution. | ||
267 | */ | ||
268 | static void spu_reschedule(struct spu *spu) | ||
245 | { | 269 | { |
246 | int best = sched_find_first_bit(spu_prio->bitmap); | 270 | int best; |
271 | |||
272 | spu_free(spu); | ||
273 | |||
274 | spin_lock(&spu_prio->runq_lock); | ||
275 | best = sched_find_first_bit(spu_prio->bitmap); | ||
247 | if (best < MAX_PRIO) { | 276 | if (best < MAX_PRIO) { |
248 | wait_queue_head_t *wq = &spu_prio->waitq[best]; | 277 | struct spu_context *ctx = spu_grab_context(best); |
249 | wake_up_interruptible_nr(wq, 1); | 278 | if (ctx) |
279 | wake_up(&ctx->stop_wq); | ||
250 | } | 280 | } |
281 | spin_unlock(&spu_prio->runq_lock); | ||
251 | } | 282 | } |
252 | 283 | ||
253 | static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags) | 284 | static struct spu *spu_get_idle(struct spu_context *ctx) |
254 | { | 285 | { |
255 | struct spu *spu = NULL; | 286 | struct spu *spu = NULL; |
256 | int node = cpu_to_node(raw_smp_processor_id()); | 287 | int node = cpu_to_node(raw_smp_processor_id()); |
@@ -267,15 +298,6 @@ static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags) | |||
267 | return spu; | 298 | return spu; |
268 | } | 299 | } |
269 | 300 | ||
270 | static inline struct spu *spu_get(struct spu_context *ctx, u64 flags) | ||
271 | { | ||
272 | /* Future: spu_get_idle() if possible, | ||
273 | * otherwise try to preempt an active | ||
274 | * context. | ||
275 | */ | ||
276 | return spu_get_idle(ctx, flags); | ||
277 | } | ||
278 | |||
279 | /* The three externally callable interfaces | 301 | /* The three externally callable interfaces |
280 | * for the scheduler begin here. | 302 | * for the scheduler begin here. |
281 | * | 303 | * |
@@ -284,32 +306,36 @@ static inline struct spu *spu_get(struct spu_context *ctx, u64 flags) | |||
284 | * spu_yield - yield an SPU if others are waiting. | 306 | * spu_yield - yield an SPU if others are waiting. |
285 | */ | 307 | */ |
286 | 308 | ||
309 | /** | ||
310 | * spu_activate - find a free spu for a context and execute it | ||
311 | * @ctx: spu context to schedule | ||
312 | * @flags: flags (currently ignored) | ||
313 | * | ||
314 | * Tries to find a free spu to run @ctx. If no free spu is availble | ||
315 | * add the context to the runqueue so it gets woken up once an spu | ||
316 | * is available. | ||
317 | */ | ||
287 | int spu_activate(struct spu_context *ctx, u64 flags) | 318 | int spu_activate(struct spu_context *ctx, u64 flags) |
288 | { | 319 | { |
289 | struct spu *spu; | ||
290 | int ret = 0; | ||
291 | 320 | ||
292 | for (;;) { | 321 | if (ctx->spu) |
293 | if (ctx->spu) | 322 | return 0; |
294 | return 0; | 323 | |
295 | spu = spu_get(ctx, flags); | 324 | do { |
296 | if (spu != NULL) { | 325 | struct spu *spu; |
297 | if (ctx->spu != NULL) { | 326 | |
298 | spu_free(spu); | 327 | spu = spu_get_idle(ctx); |
299 | spu_prio_wakeup(); | 328 | if (spu) { |
300 | break; | ||
301 | } | ||
302 | spu_bind_context(spu, ctx); | 329 | spu_bind_context(spu, ctx); |
303 | break; | 330 | return 0; |
304 | } | ||
305 | spu_prio_wait(ctx, flags); | ||
306 | if (signal_pending(current)) { | ||
307 | ret = -ERESTARTSYS; | ||
308 | spu_prio_wakeup(); | ||
309 | break; | ||
310 | } | 331 | } |
311 | } | 332 | |
312 | return ret; | 333 | spu_add_to_rq(ctx); |
334 | spu_prio_wait(ctx); | ||
335 | spu_del_from_rq(ctx); | ||
336 | } while (!signal_pending(current)); | ||
337 | |||
338 | return -ERESTARTSYS; | ||
313 | } | 339 | } |
314 | 340 | ||
315 | void spu_deactivate(struct spu_context *ctx) | 341 | void spu_deactivate(struct spu_context *ctx) |
@@ -321,10 +347,8 @@ void spu_deactivate(struct spu_context *ctx) | |||
321 | if (!spu) | 347 | if (!spu) |
322 | return; | 348 | return; |
323 | was_active = spu_unbind_context(spu, ctx); | 349 | was_active = spu_unbind_context(spu, ctx); |
324 | if (was_active) { | 350 | if (was_active) |
325 | spu_free(spu); | 351 | spu_reschedule(spu); |
326 | spu_prio_wakeup(); | ||
327 | } | ||
328 | } | 352 | } |
329 | 353 | ||
330 | void spu_yield(struct spu_context *ctx) | 354 | void spu_yield(struct spu_context *ctx) |
@@ -359,7 +383,7 @@ int __init spu_sched_init(void) | |||
359 | return 1; | 383 | return 1; |
360 | } | 384 | } |
361 | for (i = 0; i < MAX_PRIO; i++) { | 385 | for (i = 0; i < MAX_PRIO; i++) { |
362 | init_waitqueue_head(&spu_prio->waitq[i]); | 386 | INIT_LIST_HEAD(&spu_prio->runq[i]); |
363 | __clear_bit(i, spu_prio->bitmap); | 387 | __clear_bit(i, spu_prio->bitmap); |
364 | } | 388 | } |
365 | __set_bit(MAX_PRIO, spu_prio->bitmap); | 389 | __set_bit(MAX_PRIO, spu_prio->bitmap); |
@@ -367,6 +391,7 @@ int __init spu_sched_init(void) | |||
367 | mutex_init(&spu_prio->active_mutex[i]); | 391 | mutex_init(&spu_prio->active_mutex[i]); |
368 | INIT_LIST_HEAD(&spu_prio->active_list[i]); | 392 | INIT_LIST_HEAD(&spu_prio->active_list[i]); |
369 | } | 393 | } |
394 | spin_lock_init(&spu_prio->runq_lock); | ||
370 | return 0; | 395 | return 0; |
371 | } | 396 | } |
372 | 397 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index b500e94188b5..7f5a4fc03c00 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h | |||
@@ -76,6 +76,7 @@ struct spu_context { | |||
76 | struct spu_gang *gang; | 76 | struct spu_gang *gang; |
77 | 77 | ||
78 | /* scheduler fields */ | 78 | /* scheduler fields */ |
79 | struct list_head rq; | ||
79 | int prio; | 80 | int prio; |
80 | }; | 81 | }; |
81 | 82 | ||