aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c72
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c198
-rw-r--r--include/asm-powerpc/spu.h11
3 files changed, 112 insertions, 169 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 8617b507af49..90124228b8f4 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -409,7 +409,7 @@ static void spu_free_irqs(struct spu *spu)
409 free_irq(spu->irqs[2], spu); 409 free_irq(spu->irqs[2], spu);
410} 410}
411 411
412static void spu_init_channels(struct spu *spu) 412void spu_init_channels(struct spu *spu)
413{ 413{
414 static const struct { 414 static const struct {
415 unsigned channel; 415 unsigned channel;
@@ -442,66 +442,7 @@ static void spu_init_channels(struct spu *spu)
442 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); 442 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
443 } 443 }
444} 444}
445 445EXPORT_SYMBOL_GPL(spu_init_channels);
446struct spu *spu_alloc_spu(struct spu *req_spu)
447{
448 struct spu *spu, *ret = NULL;
449
450 spin_lock(&spu_lock);
451 list_for_each_entry(spu, &cbe_spu_info[req_spu->node].free_spus, list) {
452 if (spu == req_spu) {
453 list_del_init(&spu->list);
454 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
455 spu_init_channels(spu);
456 ret = spu;
457 break;
458 }
459 }
460 spin_unlock(&spu_lock);
461 return ret;
462}
463EXPORT_SYMBOL_GPL(spu_alloc_spu);
464
465struct spu *spu_alloc_node(int node)
466{
467 struct spu *spu = NULL;
468
469 spin_lock(&spu_lock);
470 if (!list_empty(&cbe_spu_info[node].free_spus)) {
471 spu = list_entry(cbe_spu_info[node].free_spus.next, struct spu,
472 list);
473 list_del_init(&spu->list);
474 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
475 }
476 spin_unlock(&spu_lock);
477
478 if (spu)
479 spu_init_channels(spu);
480 return spu;
481}
482EXPORT_SYMBOL_GPL(spu_alloc_node);
483
484struct spu *spu_alloc(void)
485{
486 struct spu *spu = NULL;
487 int node;
488
489 for (node = 0; node < MAX_NUMNODES; node++) {
490 spu = spu_alloc_node(node);
491 if (spu)
492 break;
493 }
494
495 return spu;
496}
497
498void spu_free(struct spu *spu)
499{
500 spin_lock(&spu_lock);
501 list_add_tail(&spu->list, &cbe_spu_info[spu->node].free_spus);
502 spin_unlock(&spu_lock);
503}
504EXPORT_SYMBOL_GPL(spu_free);
505 446
506static int spu_shutdown(struct sys_device *sysdev) 447static int spu_shutdown(struct sys_device *sysdev)
507{ 448{
@@ -597,6 +538,8 @@ static int __init create_spu(void *data)
597 if (!spu) 538 if (!spu)
598 goto out; 539 goto out;
599 540
541 spu->alloc_state = SPU_FREE;
542
600 spin_lock_init(&spu->register_lock); 543 spin_lock_init(&spu->register_lock);
601 spin_lock(&spu_lock); 544 spin_lock(&spu_lock);
602 spu->number = number++; 545 spu->number = number++;
@@ -617,11 +560,10 @@ static int __init create_spu(void *data)
617 if (ret) 560 if (ret)
618 goto out_free_irqs; 561 goto out_free_irqs;
619 562
620 spin_lock(&spu_lock); 563 mutex_lock(&cbe_spu_info[spu->node].list_mutex);
621 list_add(&spu->list, &cbe_spu_info[spu->node].free_spus);
622 list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); 564 list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
623 cbe_spu_info[spu->node].n_spus++; 565 cbe_spu_info[spu->node].n_spus++;
624 spin_unlock(&spu_lock); 566 mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
625 567
626 mutex_lock(&spu_full_list_mutex); 568 mutex_lock(&spu_full_list_mutex);
627 spin_lock_irqsave(&spu_full_list_lock, flags); 569 spin_lock_irqsave(&spu_full_list_lock, flags);
@@ -831,8 +773,8 @@ static int __init init_spu_base(void)
831 int i, ret = 0; 773 int i, ret = 0;
832 774
833 for (i = 0; i < MAX_NUMNODES; i++) { 775 for (i = 0; i < MAX_NUMNODES; i++) {
776 mutex_init(&cbe_spu_info[i].list_mutex);
834 INIT_LIST_HEAD(&cbe_spu_info[i].spus); 777 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
835 INIT_LIST_HEAD(&cbe_spu_info[i].free_spus);
836 } 778 }
837 779
838 if (!spu_management_ops) 780 if (!spu_management_ops)
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 44e2338a05d5..227968b4779d 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -51,9 +51,6 @@ struct spu_prio_array {
51 DECLARE_BITMAP(bitmap, MAX_PRIO); 51 DECLARE_BITMAP(bitmap, MAX_PRIO);
52 struct list_head runq[MAX_PRIO]; 52 struct list_head runq[MAX_PRIO];
53 spinlock_t runq_lock; 53 spinlock_t runq_lock;
54 struct list_head active_list[MAX_NUMNODES];
55 struct mutex active_mutex[MAX_NUMNODES];
56 int nr_active[MAX_NUMNODES];
57 int nr_waiting; 54 int nr_waiting;
58}; 55};
59 56
@@ -127,7 +124,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
127 ctx->policy = current->policy; 124 ctx->policy = current->policy;
128 125
129 /* 126 /*
130 * A lot of places that don't hold active_mutex poke into 127 * A lot of places that don't hold list_mutex poke into
131 * cpus_allowed, including grab_runnable_context which 128 * cpus_allowed, including grab_runnable_context which
132 * already holds the runq_lock. So abuse runq_lock 129 * already holds the runq_lock. So abuse runq_lock
133 * to protect this field aswell. 130 * to protect this field aswell.
@@ -141,9 +138,9 @@ void spu_update_sched_info(struct spu_context *ctx)
141{ 138{
142 int node = ctx->spu->node; 139 int node = ctx->spu->node;
143 140
144 mutex_lock(&spu_prio->active_mutex[node]); 141 mutex_lock(&cbe_spu_info[node].list_mutex);
145 __spu_update_sched_info(ctx); 142 __spu_update_sched_info(ctx);
146 mutex_unlock(&spu_prio->active_mutex[node]); 143 mutex_unlock(&cbe_spu_info[node].list_mutex);
147} 144}
148 145
149static int __node_allowed(struct spu_context *ctx, int node) 146static int __node_allowed(struct spu_context *ctx, int node)
@@ -169,39 +166,6 @@ static int node_allowed(struct spu_context *ctx, int node)
169 return rval; 166 return rval;
170} 167}
171 168
172/**
173 * spu_add_to_active_list - add spu to active list
174 * @spu: spu to add to the active list
175 */
176static void spu_add_to_active_list(struct spu *spu)
177{
178 int node = spu->node;
179
180 mutex_lock(&spu_prio->active_mutex[node]);
181 spu_prio->nr_active[node]++;
182 list_add_tail(&spu->list, &spu_prio->active_list[node]);
183 mutex_unlock(&spu_prio->active_mutex[node]);
184}
185
186static void __spu_remove_from_active_list(struct spu *spu)
187{
188 list_del_init(&spu->list);
189 spu_prio->nr_active[spu->node]--;
190}
191
192/**
193 * spu_remove_from_active_list - remove spu from active list
194 * @spu: spu to remove from the active list
195 */
196static void spu_remove_from_active_list(struct spu *spu)
197{
198 int node = spu->node;
199
200 mutex_lock(&spu_prio->active_mutex[node]);
201 __spu_remove_from_active_list(spu);
202 mutex_unlock(&spu_prio->active_mutex[node]);
203}
204
205static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier); 169static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
206 170
207void spu_switch_notify(struct spu *spu, struct spu_context *ctx) 171void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
@@ -222,15 +186,18 @@ static void notify_spus_active(void)
222 */ 186 */
223 for_each_online_node(node) { 187 for_each_online_node(node) {
224 struct spu *spu; 188 struct spu *spu;
225 mutex_lock(&spu_prio->active_mutex[node]); 189
226 list_for_each_entry(spu, &spu_prio->active_list[node], list) { 190 mutex_lock(&cbe_spu_info[node].list_mutex);
227 struct spu_context *ctx = spu->ctx; 191 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
228 set_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags); 192 if (spu->alloc_state != SPU_FREE) {
229 mb(); /* make sure any tasks woken up below */ 193 struct spu_context *ctx = spu->ctx;
230 /* can see the bit(s) set above */ 194 set_bit(SPU_SCHED_NOTIFY_ACTIVE,
231 wake_up_all(&ctx->stop_wq); 195 &ctx->sched_flags);
196 mb();
197 wake_up_all(&ctx->stop_wq);
198 }
232 } 199 }
233 mutex_unlock(&spu_prio->active_mutex[node]); 200 mutex_unlock(&cbe_spu_info[node].list_mutex);
234 } 201 }
235} 202}
236 203
@@ -293,10 +260,12 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
293} 260}
294 261
295/* 262/*
296 * XXX(hch): needs locking. 263 * Must be used with the list_mutex held.
297 */ 264 */
298static inline int sched_spu(struct spu *spu) 265static inline int sched_spu(struct spu *spu)
299{ 266{
267 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
268
300 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED)); 269 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
301} 270}
302 271
@@ -349,11 +318,15 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
349 node = (node < MAX_NUMNODES) ? node : 0; 318 node = (node < MAX_NUMNODES) ? node : 0;
350 if (!node_allowed(ctx, node)) 319 if (!node_allowed(ctx, node))
351 continue; 320 continue;
321 mutex_lock(&cbe_spu_info[node].list_mutex);
352 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { 322 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
353 if ((!mem_aff || spu->has_mem_affinity) && 323 if ((!mem_aff || spu->has_mem_affinity) &&
354 sched_spu(spu)) 324 sched_spu(spu)) {
325 mutex_unlock(&cbe_spu_info[node].list_mutex);
355 return spu; 326 return spu;
327 }
356 } 328 }
329 mutex_unlock(&cbe_spu_info[node].list_mutex);
357 } 330 }
358 return NULL; 331 return NULL;
359} 332}
@@ -381,13 +354,14 @@ static void aff_set_ref_point_location(struct spu_gang *gang)
381 gang->aff_ref_spu = aff_ref_location(ctx, mem_aff, gs, lowest_offset); 354 gang->aff_ref_spu = aff_ref_location(ctx, mem_aff, gs, lowest_offset);
382} 355}
383 356
384static struct spu *ctx_location(struct spu *ref, int offset) 357static struct spu *ctx_location(struct spu *ref, int offset, int node)
385{ 358{
386 struct spu *spu; 359 struct spu *spu;
387 360
388 spu = NULL; 361 spu = NULL;
389 if (offset >= 0) { 362 if (offset >= 0) {
390 list_for_each_entry(spu, ref->aff_list.prev, aff_list) { 363 list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
364 BUG_ON(spu->node != node);
391 if (offset == 0) 365 if (offset == 0)
392 break; 366 break;
393 if (sched_spu(spu)) 367 if (sched_spu(spu))
@@ -395,12 +369,14 @@ static struct spu *ctx_location(struct spu *ref, int offset)
395 } 369 }
396 } else { 370 } else {
397 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) { 371 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
372 BUG_ON(spu->node != node);
398 if (offset == 0) 373 if (offset == 0)
399 break; 374 break;
400 if (sched_spu(spu)) 375 if (sched_spu(spu))
401 offset++; 376 offset++;
402 } 377 }
403 } 378 }
379
404 return spu; 380 return spu;
405} 381}
406 382
@@ -408,13 +384,13 @@ static struct spu *ctx_location(struct spu *ref, int offset)
408 * affinity_check is called each time a context is going to be scheduled. 384 * affinity_check is called each time a context is going to be scheduled.
409 * It returns the spu ptr on which the context must run. 385 * It returns the spu ptr on which the context must run.
410 */ 386 */
411struct spu *affinity_check(struct spu_context *ctx) 387static int has_affinity(struct spu_context *ctx)
412{ 388{
413 struct spu_gang *gang; 389 struct spu_gang *gang = ctx->gang;
414 390
415 if (list_empty(&ctx->aff_list)) 391 if (list_empty(&ctx->aff_list))
416 return NULL; 392 return 0;
417 gang = ctx->gang; 393
418 mutex_lock(&gang->aff_mutex); 394 mutex_lock(&gang->aff_mutex);
419 if (!gang->aff_ref_spu) { 395 if (!gang->aff_ref_spu) {
420 if (!(gang->aff_flags & AFF_MERGED)) 396 if (!(gang->aff_flags & AFF_MERGED))
@@ -424,9 +400,8 @@ struct spu *affinity_check(struct spu_context *ctx)
424 aff_set_ref_point_location(gang); 400 aff_set_ref_point_location(gang);
425 } 401 }
426 mutex_unlock(&gang->aff_mutex); 402 mutex_unlock(&gang->aff_mutex);
427 if (!gang->aff_ref_spu) 403
428 return NULL; 404 return gang->aff_ref_spu != NULL;
429 return ctx_location(gang->aff_ref_spu, ctx->aff_offset);
430} 405}
431 406
432/** 407/**
@@ -535,22 +510,41 @@ static void spu_prio_wait(struct spu_context *ctx)
535 510
536static struct spu *spu_get_idle(struct spu_context *ctx) 511static struct spu *spu_get_idle(struct spu_context *ctx)
537{ 512{
538 struct spu *spu = NULL; 513 struct spu *spu;
539 int node = cpu_to_node(raw_smp_processor_id()); 514 int node, n;
540 int n; 515
516 if (has_affinity(ctx)) {
517 node = ctx->gang->aff_ref_spu->node;
541 518
542 spu = affinity_check(ctx); 519 mutex_lock(&cbe_spu_info[node].list_mutex);
543 if (spu) 520 spu = ctx_location(ctx->gang->aff_ref_spu, ctx->aff_offset, node);
544 return spu_alloc_spu(spu); 521 if (spu && spu->alloc_state == SPU_FREE)
522 goto found;
523 mutex_unlock(&cbe_spu_info[node].list_mutex);
524 return NULL;
525 }
545 526
527 node = cpu_to_node(raw_smp_processor_id());
546 for (n = 0; n < MAX_NUMNODES; n++, node++) { 528 for (n = 0; n < MAX_NUMNODES; n++, node++) {
547 node = (node < MAX_NUMNODES) ? node : 0; 529 node = (node < MAX_NUMNODES) ? node : 0;
548 if (!node_allowed(ctx, node)) 530 if (!node_allowed(ctx, node))
549 continue; 531 continue;
550 spu = spu_alloc_node(node); 532
551 if (spu) 533 mutex_lock(&cbe_spu_info[node].list_mutex);
552 break; 534 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
535 if (spu->alloc_state == SPU_FREE)
536 goto found;
537 }
538 mutex_unlock(&cbe_spu_info[node].list_mutex);
553 } 539 }
540
541 return NULL;
542
543 found:
544 spu->alloc_state = SPU_USED;
545 mutex_unlock(&cbe_spu_info[node].list_mutex);
546 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
547 spu_init_channels(spu);
554 return spu; 548 return spu;
555} 549}
556 550
@@ -580,15 +574,15 @@ static struct spu *find_victim(struct spu_context *ctx)
580 if (!node_allowed(ctx, node)) 574 if (!node_allowed(ctx, node))
581 continue; 575 continue;
582 576
583 mutex_lock(&spu_prio->active_mutex[node]); 577 mutex_lock(&cbe_spu_info[node].list_mutex);
584 list_for_each_entry(spu, &spu_prio->active_list[node], list) { 578 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
585 struct spu_context *tmp = spu->ctx; 579 struct spu_context *tmp = spu->ctx;
586 580
587 if (tmp->prio > ctx->prio && 581 if (tmp->prio > ctx->prio &&
588 (!victim || tmp->prio > victim->prio)) 582 (!victim || tmp->prio > victim->prio))
589 victim = spu->ctx; 583 victim = spu->ctx;
590 } 584 }
591 mutex_unlock(&spu_prio->active_mutex[node]); 585 mutex_unlock(&cbe_spu_info[node].list_mutex);
592 586
593 if (victim) { 587 if (victim) {
594 /* 588 /*
@@ -613,7 +607,11 @@ static struct spu *find_victim(struct spu_context *ctx)
613 victim = NULL; 607 victim = NULL;
614 goto restart; 608 goto restart;
615 } 609 }
616 spu_remove_from_active_list(spu); 610
611 mutex_lock(&cbe_spu_info[node].list_mutex);
612 cbe_spu_info[node].nr_active--;
613 mutex_unlock(&cbe_spu_info[node].list_mutex);
614
617 spu_unbind_context(spu, victim); 615 spu_unbind_context(spu, victim);
618 victim->stats.invol_ctx_switch++; 616 victim->stats.invol_ctx_switch++;
619 spu->stats.invol_ctx_switch++; 617 spu->stats.invol_ctx_switch++;
@@ -662,8 +660,12 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
662 if (!spu && rt_prio(ctx->prio)) 660 if (!spu && rt_prio(ctx->prio))
663 spu = find_victim(ctx); 661 spu = find_victim(ctx);
664 if (spu) { 662 if (spu) {
663 int node = spu->node;
664
665 mutex_lock(&cbe_spu_info[node].list_mutex);
665 spu_bind_context(spu, ctx); 666 spu_bind_context(spu, ctx);
666 spu_add_to_active_list(spu); 667 cbe_spu_info[node].nr_active++;
668 mutex_unlock(&cbe_spu_info[node].list_mutex);
667 return 0; 669 return 0;
668 } 670 }
669 671
@@ -712,11 +714,17 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
712 if (spu) { 714 if (spu) {
713 new = grab_runnable_context(max_prio, spu->node); 715 new = grab_runnable_context(max_prio, spu->node);
714 if (new || force) { 716 if (new || force) {
715 spu_remove_from_active_list(spu); 717 int node = spu->node;
718
719 mutex_lock(&cbe_spu_info[node].list_mutex);
716 spu_unbind_context(spu, ctx); 720 spu_unbind_context(spu, ctx);
721 spu->alloc_state = SPU_FREE;
722 cbe_spu_info[node].nr_active--;
723 mutex_unlock(&cbe_spu_info[node].list_mutex);
724
717 ctx->stats.vol_ctx_switch++; 725 ctx->stats.vol_ctx_switch++;
718 spu->stats.vol_ctx_switch++; 726 spu->stats.vol_ctx_switch++;
719 spu_free(spu); 727
720 if (new) 728 if (new)
721 wake_up(&new->stop_wq); 729 wake_up(&new->stop_wq);
722 } 730 }
@@ -755,7 +763,7 @@ void spu_yield(struct spu_context *ctx)
755 } 763 }
756} 764}
757 765
758static void spusched_tick(struct spu_context *ctx) 766static noinline void spusched_tick(struct spu_context *ctx)
759{ 767{
760 if (ctx->flags & SPU_CREATE_NOSCHED) 768 if (ctx->flags & SPU_CREATE_NOSCHED)
761 return; 769 return;
@@ -766,7 +774,7 @@ static void spusched_tick(struct spu_context *ctx)
766 return; 774 return;
767 775
768 /* 776 /*
769 * Unfortunately active_mutex ranks outside of state_mutex, so 777 * Unfortunately list_mutex ranks outside of state_mutex, so
770 * we have to trylock here. If we fail give the context another 778 * we have to trylock here. If we fail give the context another
771 * tick and try again. 779 * tick and try again.
772 */ 780 */
@@ -776,12 +784,11 @@ static void spusched_tick(struct spu_context *ctx)
776 784
777 new = grab_runnable_context(ctx->prio + 1, spu->node); 785 new = grab_runnable_context(ctx->prio + 1, spu->node);
778 if (new) { 786 if (new) {
779
780 __spu_remove_from_active_list(spu);
781 spu_unbind_context(spu, ctx); 787 spu_unbind_context(spu, ctx);
782 ctx->stats.invol_ctx_switch++; 788 ctx->stats.invol_ctx_switch++;
783 spu->stats.invol_ctx_switch++; 789 spu->stats.invol_ctx_switch++;
784 spu_free(spu); 790 spu->alloc_state = SPU_FREE;
791 cbe_spu_info[spu->node].nr_active--;
785 wake_up(&new->stop_wq); 792 wake_up(&new->stop_wq);
786 /* 793 /*
787 * We need to break out of the wait loop in 794 * We need to break out of the wait loop in
@@ -802,7 +809,7 @@ static void spusched_tick(struct spu_context *ctx)
802 * 809 *
803 * Return the number of tasks currently running or waiting to run. 810 * Return the number of tasks currently running or waiting to run.
804 * 811 *
805 * Note that we don't take runq_lock / active_mutex here. Reading 812 * Note that we don't take runq_lock / list_mutex here. Reading
806 * a single 32bit value is atomic on powerpc, and we don't care 813 * a single 32bit value is atomic on powerpc, and we don't care
807 * about memory ordering issues here. 814 * about memory ordering issues here.
808 */ 815 */
@@ -811,7 +818,7 @@ static unsigned long count_active_contexts(void)
811 int nr_active = 0, node; 818 int nr_active = 0, node;
812 819
813 for (node = 0; node < MAX_NUMNODES; node++) 820 for (node = 0; node < MAX_NUMNODES; node++)
814 nr_active += spu_prio->nr_active[node]; 821 nr_active += cbe_spu_info[node].nr_active;
815 nr_active += spu_prio->nr_waiting; 822 nr_active += spu_prio->nr_waiting;
816 823
817 return nr_active; 824 return nr_active;
@@ -851,19 +858,18 @@ static void spusched_wake(unsigned long data)
851 858
852static int spusched_thread(void *unused) 859static int spusched_thread(void *unused)
853{ 860{
854 struct spu *spu, *next; 861 struct spu *spu;
855 int node; 862 int node;
856 863
857 while (!kthread_should_stop()) { 864 while (!kthread_should_stop()) {
858 set_current_state(TASK_INTERRUPTIBLE); 865 set_current_state(TASK_INTERRUPTIBLE);
859 schedule(); 866 schedule();
860 for (node = 0; node < MAX_NUMNODES; node++) { 867 for (node = 0; node < MAX_NUMNODES; node++) {
861 mutex_lock(&spu_prio->active_mutex[node]); 868 mutex_lock(&cbe_spu_info[node].list_mutex);
862 list_for_each_entry_safe(spu, next, 869 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
863 &spu_prio->active_list[node], 870 if (spu->ctx)
864 list) 871 spusched_tick(spu->ctx);
865 spusched_tick(spu->ctx); 872 mutex_unlock(&cbe_spu_info[node].list_mutex);
866 mutex_unlock(&spu_prio->active_mutex[node]);
867 } 873 }
868 } 874 }
869 875
@@ -922,8 +928,8 @@ int __init spu_sched_init(void)
922 __clear_bit(i, spu_prio->bitmap); 928 __clear_bit(i, spu_prio->bitmap);
923 } 929 }
924 for (i = 0; i < MAX_NUMNODES; i++) { 930 for (i = 0; i < MAX_NUMNODES; i++) {
925 mutex_init(&spu_prio->active_mutex[i]); 931 mutex_init(&cbe_spu_info[i].list_mutex);
926 INIT_LIST_HEAD(&spu_prio->active_list[i]); 932 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
927 } 933 }
928 spin_lock_init(&spu_prio->runq_lock); 934 spin_lock_init(&spu_prio->runq_lock);
929 935
@@ -954,7 +960,7 @@ int __init spu_sched_init(void)
954 960
955void spu_sched_exit(void) 961void spu_sched_exit(void)
956{ 962{
957 struct spu *spu, *tmp; 963 struct spu *spu;
958 int node; 964 int node;
959 965
960 remove_proc_entry("spu_loadavg", NULL); 966 remove_proc_entry("spu_loadavg", NULL);
@@ -963,13 +969,11 @@ void spu_sched_exit(void)
963 kthread_stop(spusched_task); 969 kthread_stop(spusched_task);
964 970
965 for (node = 0; node < MAX_NUMNODES; node++) { 971 for (node = 0; node < MAX_NUMNODES; node++) {
966 mutex_lock(&spu_prio->active_mutex[node]); 972 mutex_lock(&cbe_spu_info[node].list_mutex);
967 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node], 973 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
968 list) { 974 if (spu->alloc_state != SPU_FREE)
969 list_del_init(&spu->list); 975 spu->alloc_state = SPU_FREE;
970 spu_free(spu); 976 mutex_unlock(&cbe_spu_info[node].list_mutex);
971 }
972 mutex_unlock(&spu_prio->active_mutex[node]);
973 } 977 }
974 kfree(spu_prio); 978 kfree(spu_prio);
975} 979}
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index a0f7fc8e23bb..8836c0f1f2f7 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -121,10 +121,9 @@ struct spu {
121 unsigned long problem_phys; 121 unsigned long problem_phys;
122 struct spu_problem __iomem *problem; 122 struct spu_problem __iomem *problem;
123 struct spu_priv2 __iomem *priv2; 123 struct spu_priv2 __iomem *priv2;
124 struct list_head list;
125 struct list_head cbe_list; 124 struct list_head cbe_list;
126 struct list_head sched_list;
127 struct list_head full_list; 125 struct list_head full_list;
126 enum { SPU_FREE, SPU_USED } alloc_state;
128 int number; 127 int number;
129 unsigned int irqs[3]; 128 unsigned int irqs[3];
130 u32 node; 129 u32 node;
@@ -187,18 +186,16 @@ struct spu {
187}; 186};
188 187
189struct cbe_spu_info { 188struct cbe_spu_info {
189 struct mutex list_mutex;
190 struct list_head spus; 190 struct list_head spus;
191 struct list_head free_spus;
192 int n_spus; 191 int n_spus;
192 int nr_active;
193 atomic_t reserved_spus; 193 atomic_t reserved_spus;
194}; 194};
195 195
196extern struct cbe_spu_info cbe_spu_info[]; 196extern struct cbe_spu_info cbe_spu_info[];
197 197
198struct spu *spu_alloc(void); 198void spu_init_channels(struct spu *spu);
199struct spu *spu_alloc_node(int node);
200struct spu *spu_alloc_spu(struct spu *spu);
201void spu_free(struct spu *spu);
202int spu_irq_class_0_bottom(struct spu *spu); 199int spu_irq_class_0_bottom(struct spu *spu);
203int spu_irq_class_1_bottom(struct spu *spu); 200int spu_irq_class_1_bottom(struct spu *spu);
204void spu_irq_setaffinity(struct spu *spu, int cpu); 201void spu_irq_setaffinity(struct spu *spu, int cpu);