aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-06-28 20:57:56 -0400
committerPaul Mackerras <paulus@samba.org>2007-07-03 01:24:45 -0400
commitea1ae5949d7fcd2e622226ba71741a0f43b6ef0a (patch)
tree869e76ffe14b24f0d3aa92f9d6ed267a3326cc08 /arch/powerpc
parent2cf2b3b49f10d2f4a0703070fc54ce1cd84a6cda (diff)
[POWERPC] spusched: fix cpu/node binding
Add a cpus_allowed allowed filed to struct spu_context so that we always use the cpu mask of the owning thread instead of the one happening to call into the scheduler. Also use this information in grab_runnable_context to avoid spurious wakeups. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c70
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
3 files changed, 52 insertions, 22 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index c778d9178e0f..6ff2a75589f3 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -53,7 +53,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
53 INIT_LIST_HEAD(&ctx->rq); 53 INIT_LIST_HEAD(&ctx->rq);
54 if (gang) 54 if (gang)
55 spu_gang_add_ctx(gang, ctx); 55 spu_gang_add_ctx(gang, ctx);
56 56 ctx->cpus_allowed = current->cpus_allowed;
57 spu_set_timeslice(ctx); 57 spu_set_timeslice(ctx);
58 goto out; 58 goto out;
59out_free: 59out_free:
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 3707c7fdbdee..69272620a6b7 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -112,6 +112,16 @@ void __spu_update_sched_info(struct spu_context *ctx)
112 else 112 else
113 ctx->prio = current->static_prio; 113 ctx->prio = current->static_prio;
114 ctx->policy = current->policy; 114 ctx->policy = current->policy;
115
116 /*
117 * A lot of places that don't hold active_mutex poke into
118 * cpus_allowed, including grab_runnable_context which
119 * already holds the runq_lock. So abuse runq_lock
120 * to protect this field aswell.
121 */
122 spin_lock(&spu_prio->runq_lock);
123 ctx->cpus_allowed = current->cpus_allowed;
124 spin_unlock(&spu_prio->runq_lock);
115} 125}
116 126
117void spu_update_sched_info(struct spu_context *ctx) 127void spu_update_sched_info(struct spu_context *ctx)
@@ -123,16 +133,27 @@ void spu_update_sched_info(struct spu_context *ctx)
123 mutex_unlock(&spu_prio->active_mutex[node]); 133 mutex_unlock(&spu_prio->active_mutex[node]);
124} 134}
125 135
126static inline int node_allowed(int node) 136static int __node_allowed(struct spu_context *ctx, int node)
127{ 137{
128 cpumask_t mask; 138 if (nr_cpus_node(node)) {
139 cpumask_t mask = node_to_cpumask(node);
129 140
130 if (!nr_cpus_node(node)) 141 if (cpus_intersects(mask, ctx->cpus_allowed))
131 return 0; 142 return 1;
132 mask = node_to_cpumask(node); 143 }
133 if (!cpus_intersects(mask, current->cpus_allowed)) 144
134 return 0; 145 return 0;
135 return 1; 146}
147
148static int node_allowed(struct spu_context *ctx, int node)
149{
150 int rval;
151
152 spin_lock(&spu_prio->runq_lock);
153 rval = __node_allowed(ctx, node);
154 spin_unlock(&spu_prio->runq_lock);
155
156 return rval;
136} 157}
137 158
138/** 159/**
@@ -289,7 +310,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
289 310
290 for (n = 0; n < MAX_NUMNODES; n++, node++) { 311 for (n = 0; n < MAX_NUMNODES; n++, node++) {
291 node = (node < MAX_NUMNODES) ? node : 0; 312 node = (node < MAX_NUMNODES) ? node : 0;
292 if (!node_allowed(node)) 313 if (!node_allowed(ctx, node))
293 continue; 314 continue;
294 spu = spu_alloc_node(node); 315 spu = spu_alloc_node(node);
295 if (spu) 316 if (spu)
@@ -321,7 +342,7 @@ static struct spu *find_victim(struct spu_context *ctx)
321 node = cpu_to_node(raw_smp_processor_id()); 342 node = cpu_to_node(raw_smp_processor_id());
322 for (n = 0; n < MAX_NUMNODES; n++, node++) { 343 for (n = 0; n < MAX_NUMNODES; n++, node++) {
323 node = (node < MAX_NUMNODES) ? node : 0; 344 node = (node < MAX_NUMNODES) ? node : 0;
324 if (!node_allowed(node)) 345 if (!node_allowed(ctx, node))
325 continue; 346 continue;
326 347
327 mutex_lock(&spu_prio->active_mutex[node]); 348 mutex_lock(&spu_prio->active_mutex[node]);
@@ -416,23 +437,28 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
416 * Remove the highest priority context on the runqueue and return it 437 * Remove the highest priority context on the runqueue and return it
417 * to the caller. Returns %NULL if no runnable context was found. 438 * to the caller. Returns %NULL if no runnable context was found.
418 */ 439 */
419static struct spu_context *grab_runnable_context(int prio) 440static struct spu_context *grab_runnable_context(int prio, int node)
420{ 441{
421 struct spu_context *ctx = NULL; 442 struct spu_context *ctx;
422 int best; 443 int best;
423 444
424 spin_lock(&spu_prio->runq_lock); 445 spin_lock(&spu_prio->runq_lock);
425 best = sched_find_first_bit(spu_prio->bitmap); 446 best = sched_find_first_bit(spu_prio->bitmap);
426 if (best < prio) { 447 while (best < prio) {
427 struct list_head *rq = &spu_prio->runq[best]; 448 struct list_head *rq = &spu_prio->runq[best];
428 449
429 BUG_ON(list_empty(rq)); 450 list_for_each_entry(ctx, rq, rq) {
430 451 /* XXX(hch): check for affinity here aswell */
431 ctx = list_entry(rq->next, struct spu_context, rq); 452 if (__node_allowed(ctx, node)) {
432 __spu_del_from_rq(ctx); 453 __spu_del_from_rq(ctx);
454 goto found;
455 }
456 }
457 best++;
433 } 458 }
459 ctx = NULL;
460 found:
434 spin_unlock(&spu_prio->runq_lock); 461 spin_unlock(&spu_prio->runq_lock);
435
436 return ctx; 462 return ctx;
437} 463}
438 464
@@ -442,7 +468,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
442 struct spu_context *new = NULL; 468 struct spu_context *new = NULL;
443 469
444 if (spu) { 470 if (spu) {
445 new = grab_runnable_context(max_prio); 471 new = grab_runnable_context(max_prio, spu->node);
446 if (new || force) { 472 if (new || force) {
447 spu_remove_from_active_list(spu); 473 spu_remove_from_active_list(spu);
448 spu_unbind_context(spu, ctx); 474 spu_unbind_context(spu, ctx);
@@ -496,9 +522,11 @@ static void spusched_tick(struct spu_context *ctx)
496 * tick and try again. 522 * tick and try again.
497 */ 523 */
498 if (mutex_trylock(&ctx->state_mutex)) { 524 if (mutex_trylock(&ctx->state_mutex)) {
499 struct spu_context *new = grab_runnable_context(ctx->prio + 1); 525 struct spu *spu = ctx->spu;
526 struct spu_context *new;
527
528 new = grab_runnable_context(ctx->prio + 1, spu->node);
500 if (new) { 529 if (new) {
501 struct spu *spu = ctx->spu;
502 530
503 __spu_remove_from_active_list(spu); 531 __spu_remove_from_active_list(spu);
504 spu_unbind_context(spu, ctx); 532 spu_unbind_context(spu, ctx);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index ff77f904fa31..98d3c18b2b6f 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -26,6 +26,7 @@
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/fs.h> 28#include <linux/fs.h>
29#include <linux/cpumask.h>
29 30
30#include <asm/spu.h> 31#include <asm/spu.h>
31#include <asm/spu_csa.h> 32#include <asm/spu_csa.h>
@@ -80,6 +81,7 @@ struct spu_context {
80 struct list_head rq; 81 struct list_head rq;
81 unsigned int time_slice; 82 unsigned int time_slice;
82 unsigned long sched_flags; 83 unsigned long sched_flags;
84 cpumask_t cpus_allowed;
83 int policy; 85 int policy;
84 int prio; 86 int prio;
85}; 87};