diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 21 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 5 |
2 files changed, 19 insertions, 7 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index caaf2bf78cad..dd632e5feff3 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -41,7 +41,6 @@ EXPORT_SYMBOL_GPL(spu_management_ops); | |||
41 | 41 | ||
42 | const struct spu_priv1_ops *spu_priv1_ops; | 42 | const struct spu_priv1_ops *spu_priv1_ops; |
43 | 43 | ||
44 | static struct list_head spu_list[MAX_NUMNODES]; | ||
45 | static LIST_HEAD(spu_full_list); | 44 | static LIST_HEAD(spu_full_list); |
46 | static DEFINE_MUTEX(spu_mutex); | 45 | static DEFINE_MUTEX(spu_mutex); |
47 | static DEFINE_SPINLOCK(spu_list_lock); | 46 | static DEFINE_SPINLOCK(spu_list_lock); |
@@ -429,8 +428,9 @@ struct spu *spu_alloc_node(int node) | |||
429 | struct spu *spu = NULL; | 428 | struct spu *spu = NULL; |
430 | 429 | ||
431 | mutex_lock(&spu_mutex); | 430 | mutex_lock(&spu_mutex); |
432 | if (!list_empty(&spu_list[node])) { | 431 | if (!list_empty(&cbe_spu_info[node].free_spus)) { |
433 | spu = list_entry(spu_list[node].next, struct spu, list); | 432 | spu = list_entry(cbe_spu_info[node].free_spus.next, struct spu, |
433 | list); | ||
434 | list_del_init(&spu->list); | 434 | list_del_init(&spu->list); |
435 | pr_debug("Got SPU %d %d\n", spu->number, spu->node); | 435 | pr_debug("Got SPU %d %d\n", spu->number, spu->node); |
436 | } | 436 | } |
@@ -459,7 +459,7 @@ struct spu *spu_alloc(void) | |||
459 | void spu_free(struct spu *spu) | 459 | void spu_free(struct spu *spu) |
460 | { | 460 | { |
461 | mutex_lock(&spu_mutex); | 461 | mutex_lock(&spu_mutex); |
462 | list_add_tail(&spu->list, &spu_list[spu->node]); | 462 | list_add_tail(&spu->list, &cbe_spu_info[spu->node].free_spus); |
463 | mutex_unlock(&spu_mutex); | 463 | mutex_unlock(&spu_mutex); |
464 | } | 464 | } |
465 | EXPORT_SYMBOL_GPL(spu_free); | 465 | EXPORT_SYMBOL_GPL(spu_free); |
@@ -582,7 +582,9 @@ static int __init create_spu(void *data) | |||
582 | 582 | ||
583 | mutex_lock(&spu_mutex); | 583 | mutex_lock(&spu_mutex); |
584 | spin_lock_irqsave(&spu_list_lock, flags); | 584 | spin_lock_irqsave(&spu_list_lock, flags); |
585 | list_add(&spu->list, &spu_list[spu->node]); | 585 | list_add(&spu->list, &cbe_spu_info[spu->node].free_spus); |
586 | list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); | ||
587 | cbe_spu_info[spu->node].n_spus++; | ||
586 | list_add(&spu->full_list, &spu_full_list); | 588 | list_add(&spu->full_list, &spu_full_list); |
587 | spin_unlock_irqrestore(&spu_list_lock, flags); | 589 | spin_unlock_irqrestore(&spu_list_lock, flags); |
588 | mutex_unlock(&spu_mutex); | 590 | mutex_unlock(&spu_mutex); |
@@ -650,12 +652,17 @@ static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf) | |||
650 | 652 | ||
651 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); | 653 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); |
652 | 654 | ||
655 | struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; | ||
656 | EXPORT_SYMBOL_GPL(cbe_spu_info); | ||
657 | |||
653 | static int __init init_spu_base(void) | 658 | static int __init init_spu_base(void) |
654 | { | 659 | { |
655 | int i, ret = 0; | 660 | int i, ret = 0; |
656 | 661 | ||
657 | for (i = 0; i < MAX_NUMNODES; i++) | 662 | for (i = 0; i < MAX_NUMNODES; i++) { |
658 | INIT_LIST_HEAD(&spu_list[i]); | 663 | INIT_LIST_HEAD(&cbe_spu_info[i].spus); |
664 | INIT_LIST_HEAD(&cbe_spu_info[i].free_spus); | ||
665 | } | ||
659 | 666 | ||
660 | if (!spu_management_ops) | 667 | if (!spu_management_ops) |
661 | goto out; | 668 | goto out; |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 12c09665404d..6d0ab72cc70e 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -231,6 +231,9 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) | |||
231 | spu->number, spu->node); | 231 | spu->number, spu->node); |
232 | spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); | 232 | spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); |
233 | 233 | ||
234 | if (ctx->flags & SPU_CREATE_NOSCHED) | ||
235 | atomic_inc(&cbe_spu_info[spu->node].reserved_spus); | ||
236 | |||
234 | ctx->stats.slb_flt_base = spu->stats.slb_flt; | 237 | ctx->stats.slb_flt_base = spu->stats.slb_flt; |
235 | ctx->stats.class2_intr_base = spu->stats.class2_intr; | 238 | ctx->stats.class2_intr_base = spu->stats.class2_intr; |
236 | 239 | ||
@@ -267,6 +270,8 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) | |||
267 | spu->pid, spu->number, spu->node); | 270 | spu->pid, spu->number, spu->node); |
268 | spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); | 271 | spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); |
269 | 272 | ||
273 | if (spu->ctx->flags & SPU_CREATE_NOSCHED) | ||
274 | atomic_dec(&cbe_spu_info[spu->node].reserved_spus); | ||
270 | spu_switch_notify(spu, NULL); | 275 | spu_switch_notify(spu, NULL); |
271 | spu_unmap_mappings(ctx); | 276 | spu_unmap_mappings(ctx); |
272 | spu_save(&ctx->csa, spu); | 277 | spu_save(&ctx->csa, spu); |