aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2007-07-20 15:39:44 -0400
committerArnd Bergmann <arnd@klappe.arndb.de>2007-07-20 15:42:11 -0400
commitaa6d5b20254a21b69092dd839b70ee148303ef25 (patch)
tree0519330e2e4eb6360866dbf2104ed987e4bdd2c0 /arch
parent7e90b74967ea54dbd6eb539e1cb151ec37f63d7f (diff)
[CELL] cell: add per BE structure with info about its SPUs
Addition of a spufs-global "cbe_info" array. Each entry contains information about one Cell/B.E. node, namelly: * list of spus (both free and busy spus are in this list); * list of free spus (replacing the static spu_list from spu_base.c) * number of spus; * number of reserved (non scheduleable) spus. SPE affinity implementation actually requires only access to one spu per BE node (since it implements its own pointer to walk through the other spus of the ring) and the number of scheduleable spus (n_spus - non_sched_spus) However having this more general structure can be useful for other functionalities, concentrating per-cbe statistics / data. Signed-off-by: Andre Detsch <adetsch@br.ibm.com> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c21
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c5
2 files changed, 19 insertions, 7 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index caaf2bf78cad..dd632e5feff3 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -41,7 +41,6 @@ EXPORT_SYMBOL_GPL(spu_management_ops);
41 41
42const struct spu_priv1_ops *spu_priv1_ops; 42const struct spu_priv1_ops *spu_priv1_ops;
43 43
44static struct list_head spu_list[MAX_NUMNODES];
45static LIST_HEAD(spu_full_list); 44static LIST_HEAD(spu_full_list);
46static DEFINE_MUTEX(spu_mutex); 45static DEFINE_MUTEX(spu_mutex);
47static DEFINE_SPINLOCK(spu_list_lock); 46static DEFINE_SPINLOCK(spu_list_lock);
@@ -429,8 +428,9 @@ struct spu *spu_alloc_node(int node)
429 struct spu *spu = NULL; 428 struct spu *spu = NULL;
430 429
431 mutex_lock(&spu_mutex); 430 mutex_lock(&spu_mutex);
432 if (!list_empty(&spu_list[node])) { 431 if (!list_empty(&cbe_spu_info[node].free_spus)) {
433 spu = list_entry(spu_list[node].next, struct spu, list); 432 spu = list_entry(cbe_spu_info[node].free_spus.next, struct spu,
433 list);
434 list_del_init(&spu->list); 434 list_del_init(&spu->list);
435 pr_debug("Got SPU %d %d\n", spu->number, spu->node); 435 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
436 } 436 }
@@ -459,7 +459,7 @@ struct spu *spu_alloc(void)
459void spu_free(struct spu *spu) 459void spu_free(struct spu *spu)
460{ 460{
461 mutex_lock(&spu_mutex); 461 mutex_lock(&spu_mutex);
462 list_add_tail(&spu->list, &spu_list[spu->node]); 462 list_add_tail(&spu->list, &cbe_spu_info[spu->node].free_spus);
463 mutex_unlock(&spu_mutex); 463 mutex_unlock(&spu_mutex);
464} 464}
465EXPORT_SYMBOL_GPL(spu_free); 465EXPORT_SYMBOL_GPL(spu_free);
@@ -582,7 +582,9 @@ static int __init create_spu(void *data)
582 582
583 mutex_lock(&spu_mutex); 583 mutex_lock(&spu_mutex);
584 spin_lock_irqsave(&spu_list_lock, flags); 584 spin_lock_irqsave(&spu_list_lock, flags);
585 list_add(&spu->list, &spu_list[spu->node]); 585 list_add(&spu->list, &cbe_spu_info[spu->node].free_spus);
586 list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
587 cbe_spu_info[spu->node].n_spus++;
586 list_add(&spu->full_list, &spu_full_list); 588 list_add(&spu->full_list, &spu_full_list);
587 spin_unlock_irqrestore(&spu_list_lock, flags); 589 spin_unlock_irqrestore(&spu_list_lock, flags);
588 mutex_unlock(&spu_mutex); 590 mutex_unlock(&spu_mutex);
@@ -650,12 +652,17 @@ static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf)
650 652
651static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); 653static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
652 654
655struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
656EXPORT_SYMBOL_GPL(cbe_spu_info);
657
653static int __init init_spu_base(void) 658static int __init init_spu_base(void)
654{ 659{
655 int i, ret = 0; 660 int i, ret = 0;
656 661
657 for (i = 0; i < MAX_NUMNODES; i++) 662 for (i = 0; i < MAX_NUMNODES; i++) {
658 INIT_LIST_HEAD(&spu_list[i]); 663 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
664 INIT_LIST_HEAD(&cbe_spu_info[i].free_spus);
665 }
659 666
660 if (!spu_management_ops) 667 if (!spu_management_ops)
661 goto out; 668 goto out;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 12c09665404d..6d0ab72cc70e 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -231,6 +231,9 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
231 spu->number, spu->node); 231 spu->number, spu->node);
232 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 232 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
233 233
234 if (ctx->flags & SPU_CREATE_NOSCHED)
235 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
236
234 ctx->stats.slb_flt_base = spu->stats.slb_flt; 237 ctx->stats.slb_flt_base = spu->stats.slb_flt;
235 ctx->stats.class2_intr_base = spu->stats.class2_intr; 238 ctx->stats.class2_intr_base = spu->stats.class2_intr;
236 239
@@ -267,6 +270,8 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
267 spu->pid, spu->number, spu->node); 270 spu->pid, spu->number, spu->node);
268 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 271 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
269 272
273 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
274 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
270 spu_switch_notify(spu, NULL); 275 spu_switch_notify(spu, NULL);
271 spu_unmap_mappings(ctx); 276 spu_unmap_mappings(ctx);
272 spu_save(&ctx->csa, spu); 277 spu_save(&ctx->csa, spu);