diff options
author | Christoph Hellwig <hch@lst.de> | 2007-02-13 15:54:27 -0500 |
---|---|---|
committer | Arnd Bergmann <arnd@klappe.arndb.de> | 2007-02-13 15:55:42 -0500 |
commit | 52f04fcf66a5d5d90790d6cfde52e391ecf2b882 (patch) | |
tree | 1fe7e3220964ddff413975fd76fde0e7fcb4fd95 /arch/powerpc | |
parent | ae7b4c5284d11d49ed9432c16505fcbeb8d3b8cf (diff) |
[POWERPC] spu sched: forced preemption at execution
If we start a spu context with realtime priority we want it to run
immediately and not wait until some other lower priority thread has
finished. Try to find a suitable victim and use it's spu in this
case.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/context.c | 1 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 74 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 1 |
3 files changed, 76 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 056a8ad02385..d581f4ec99ba 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c | |||
@@ -53,6 +53,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) | |||
53 | ctx->owner = get_task_mm(current); | 53 | ctx->owner = get_task_mm(current); |
54 | if (gang) | 54 | if (gang) |
55 | spu_gang_add_ctx(gang, ctx); | 55 | spu_gang_add_ctx(gang, ctx); |
56 | ctx->rt_priority = current->rt_priority; | ||
56 | ctx->prio = current->prio; | 57 | ctx->prio = current->prio; |
57 | goto out; | 58 | goto out; |
58 | out_free: | 59 | out_free: |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index eb06a030ca09..814f65e025f1 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -282,6 +282,74 @@ static struct spu *spu_get_idle(struct spu_context *ctx) | |||
282 | } | 282 | } |
283 | 283 | ||
284 | /** | 284 | /** |
285 | * find_victim - find a lower priority context to preempt | ||
286 | * @ctx: canidate context for running | ||
287 | * | ||
288 | * Returns the freed physical spu to run the new context on. | ||
289 | */ | ||
290 | static struct spu *find_victim(struct spu_context *ctx) | ||
291 | { | ||
292 | struct spu_context *victim = NULL; | ||
293 | struct spu *spu; | ||
294 | int node, n; | ||
295 | |||
296 | /* | ||
297 | * Look for a possible preemption candidate on the local node first. | ||
298 | * If there is no candidate look at the other nodes. This isn't | ||
299 | * exactly fair, but so far the whole spu schedule tries to keep | ||
300 | * a strong node affinity. We might want to fine-tune this in | ||
301 | * the future. | ||
302 | */ | ||
303 | restart: | ||
304 | node = cpu_to_node(raw_smp_processor_id()); | ||
305 | for (n = 0; n < MAX_NUMNODES; n++, node++) { | ||
306 | node = (node < MAX_NUMNODES) ? node : 0; | ||
307 | if (!node_allowed(node)) | ||
308 | continue; | ||
309 | |||
310 | mutex_lock(&spu_prio->active_mutex[node]); | ||
311 | list_for_each_entry(spu, &spu_prio->active_list[node], list) { | ||
312 | struct spu_context *tmp = spu->ctx; | ||
313 | |||
314 | if (tmp->rt_priority < ctx->rt_priority && | ||
315 | (!victim || tmp->rt_priority < victim->rt_priority)) | ||
316 | victim = spu->ctx; | ||
317 | } | ||
318 | mutex_unlock(&spu_prio->active_mutex[node]); | ||
319 | |||
320 | if (victim) { | ||
321 | /* | ||
322 | * This nests ctx->state_mutex, but we always lock | ||
323 | * higher priority contexts before lower priority | ||
324 | * ones, so this is safe until we introduce | ||
325 | * priority inheritance schemes. | ||
326 | */ | ||
327 | if (!mutex_trylock(&victim->state_mutex)) { | ||
328 | victim = NULL; | ||
329 | goto restart; | ||
330 | } | ||
331 | |||
332 | spu = victim->spu; | ||
333 | if (!spu) { | ||
334 | /* | ||
335 | * This race can happen because we've dropped | ||
336 | * the active list mutex. No a problem, just | ||
337 | * restart the search. | ||
338 | */ | ||
339 | mutex_unlock(&victim->state_mutex); | ||
340 | victim = NULL; | ||
341 | goto restart; | ||
342 | } | ||
343 | spu_unbind_context(spu, victim); | ||
344 | mutex_unlock(&victim->state_mutex); | ||
345 | return spu; | ||
346 | } | ||
347 | } | ||
348 | |||
349 | return NULL; | ||
350 | } | ||
351 | |||
352 | /** | ||
285 | * spu_activate - find a free spu for a context and execute it | 353 | * spu_activate - find a free spu for a context and execute it |
286 | * @ctx: spu context to schedule | 354 | * @ctx: spu context to schedule |
287 | * @flags: flags (currently ignored) | 355 | * @flags: flags (currently ignored) |
@@ -300,6 +368,12 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) | |||
300 | struct spu *spu; | 368 | struct spu *spu; |
301 | 369 | ||
302 | spu = spu_get_idle(ctx); | 370 | spu = spu_get_idle(ctx); |
371 | /* | ||
372 | * If this is a realtime thread we try to get it running by | ||
373 | * preempting a lower priority thread. | ||
374 | */ | ||
375 | if (!spu && ctx->rt_priority) | ||
376 | spu = find_victim(ctx); | ||
303 | if (spu) { | 377 | if (spu) { |
304 | spu_bind_context(spu, ctx); | 378 | spu_bind_context(spu, ctx); |
305 | return 0; | 379 | return 0; |
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 421f59167c55..85b182d16464 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h | |||
@@ -83,6 +83,7 @@ struct spu_context { | |||
83 | /* scheduler fields */ | 83 | /* scheduler fields */ |
84 | struct list_head rq; | 84 | struct list_head rq; |
85 | unsigned long sched_flags; | 85 | unsigned long sched_flags; |
86 | unsigned long rt_priority; | ||
86 | int prio; | 87 | int prio; |
87 | }; | 88 | }; |
88 | 89 | ||