aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spufs/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/sched.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c361
1 files changed, 242 insertions, 119 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 9ad53e637aee..00d914232af1 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -58,6 +58,7 @@ static unsigned long spu_avenrun[3];
58static struct spu_prio_array *spu_prio; 58static struct spu_prio_array *spu_prio;
59static struct task_struct *spusched_task; 59static struct task_struct *spusched_task;
60static struct timer_list spusched_timer; 60static struct timer_list spusched_timer;
61static struct timer_list spuloadavg_timer;
61 62
62/* 63/*
63 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). 64 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
@@ -105,15 +106,21 @@ void spu_set_timeslice(struct spu_context *ctx)
105void __spu_update_sched_info(struct spu_context *ctx) 106void __spu_update_sched_info(struct spu_context *ctx)
106{ 107{
107 /* 108 /*
108 * 32-Bit assignment are atomic on powerpc, and we don't care about 109 * assert that the context is not on the runqueue, so it is safe
109 * memory ordering here because retriving the controlling thread is 110 * to change its scheduling parameters.
110 * per defintion racy. 111 */
112 BUG_ON(!list_empty(&ctx->rq));
113
114 /*
115 * 32-Bit assignments are atomic on powerpc, and we don't care about
116 * memory ordering here because retrieving the controlling thread is
117 * per definition racy.
111 */ 118 */
112 ctx->tid = current->pid; 119 ctx->tid = current->pid;
113 120
114 /* 121 /*
115 * We do our own priority calculations, so we normally want 122 * We do our own priority calculations, so we normally want
116 * ->static_prio to start with. Unfortunately thies field 123 * ->static_prio to start with. Unfortunately this field
117 * contains junk for threads with a realtime scheduling 124 * contains junk for threads with a realtime scheduling
118 * policy so we have to look at ->prio in this case. 125 * policy so we have to look at ->prio in this case.
119 */ 126 */
@@ -124,23 +131,32 @@ void __spu_update_sched_info(struct spu_context *ctx)
124 ctx->policy = current->policy; 131 ctx->policy = current->policy;
125 132
126 /* 133 /*
127 * A lot of places that don't hold list_mutex poke into 134 * TO DO: the context may be loaded, so we may need to activate
128 * cpus_allowed, including grab_runnable_context which 135 * it again on a different node. But it shouldn't hurt anything
129 * already holds the runq_lock. So abuse runq_lock 136 * to update its parameters, because we know that the scheduler
130 * to protect this field aswell. 137 * is not actively looking at this field, since it is not on the
138 * runqueue. The context will be rescheduled on the proper node
139 * if it is timesliced or preempted.
131 */ 140 */
132 spin_lock(&spu_prio->runq_lock);
133 ctx->cpus_allowed = current->cpus_allowed; 141 ctx->cpus_allowed = current->cpus_allowed;
134 spin_unlock(&spu_prio->runq_lock);
135} 142}
136 143
137void spu_update_sched_info(struct spu_context *ctx) 144void spu_update_sched_info(struct spu_context *ctx)
138{ 145{
139 int node = ctx->spu->node; 146 int node;
140 147
141 mutex_lock(&cbe_spu_info[node].list_mutex); 148 if (ctx->state == SPU_STATE_RUNNABLE) {
142 __spu_update_sched_info(ctx); 149 node = ctx->spu->node;
143 mutex_unlock(&cbe_spu_info[node].list_mutex); 150
151 /*
152 * Take list_mutex to sync with find_victim().
153 */
154 mutex_lock(&cbe_spu_info[node].list_mutex);
155 __spu_update_sched_info(ctx);
156 mutex_unlock(&cbe_spu_info[node].list_mutex);
157 } else {
158 __spu_update_sched_info(ctx);
159 }
144} 160}
145 161
146static int __node_allowed(struct spu_context *ctx, int node) 162static int __node_allowed(struct spu_context *ctx, int node)
@@ -174,7 +190,7 @@ void do_notify_spus_active(void)
174 * Wake up the active spu_contexts. 190 * Wake up the active spu_contexts.
175 * 191 *
176 * When the awakened processes see their "notify_active" flag is set, 192 * When the awakened processes see their "notify_active" flag is set,
177 * they will call spu_switch_notify(); 193 * they will call spu_switch_notify().
178 */ 194 */
179 for_each_online_node(node) { 195 for_each_online_node(node) {
180 struct spu *spu; 196 struct spu *spu;
@@ -221,7 +237,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
221 spu->wbox_callback = spufs_wbox_callback; 237 spu->wbox_callback = spufs_wbox_callback;
222 spu->stop_callback = spufs_stop_callback; 238 spu->stop_callback = spufs_stop_callback;
223 spu->mfc_callback = spufs_mfc_callback; 239 spu->mfc_callback = spufs_mfc_callback;
224 spu->dma_callback = spufs_dma_callback;
225 mb(); 240 mb();
226 spu_unmap_mappings(ctx); 241 spu_unmap_mappings(ctx);
227 spu_restore(&ctx->csa, spu); 242 spu_restore(&ctx->csa, spu);
@@ -409,7 +424,6 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
409 spu->wbox_callback = NULL; 424 spu->wbox_callback = NULL;
410 spu->stop_callback = NULL; 425 spu->stop_callback = NULL;
411 spu->mfc_callback = NULL; 426 spu->mfc_callback = NULL;
412 spu->dma_callback = NULL;
413 spu_associate_mm(spu, NULL); 427 spu_associate_mm(spu, NULL);
414 spu->pid = 0; 428 spu->pid = 0;
415 spu->tgid = 0; 429 spu->tgid = 0;
@@ -454,6 +468,13 @@ static void __spu_add_to_rq(struct spu_context *ctx)
454 } 468 }
455} 469}
456 470
471static void spu_add_to_rq(struct spu_context *ctx)
472{
473 spin_lock(&spu_prio->runq_lock);
474 __spu_add_to_rq(ctx);
475 spin_unlock(&spu_prio->runq_lock);
476}
477
457static void __spu_del_from_rq(struct spu_context *ctx) 478static void __spu_del_from_rq(struct spu_context *ctx)
458{ 479{
459 int prio = ctx->prio; 480 int prio = ctx->prio;
@@ -468,10 +489,24 @@ static void __spu_del_from_rq(struct spu_context *ctx)
468 } 489 }
469} 490}
470 491
492void spu_del_from_rq(struct spu_context *ctx)
493{
494 spin_lock(&spu_prio->runq_lock);
495 __spu_del_from_rq(ctx);
496 spin_unlock(&spu_prio->runq_lock);
497}
498
471static void spu_prio_wait(struct spu_context *ctx) 499static void spu_prio_wait(struct spu_context *ctx)
472{ 500{
473 DEFINE_WAIT(wait); 501 DEFINE_WAIT(wait);
474 502
503 /*
504 * The caller must explicitly wait for a context to be loaded
505 * if the nosched flag is set. If NOSCHED is not set, the caller
506 * queues the context and waits for an spu event or error.
507 */
508 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
509
475 spin_lock(&spu_prio->runq_lock); 510 spin_lock(&spu_prio->runq_lock);
476 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); 511 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
477 if (!signal_pending(current)) { 512 if (!signal_pending(current)) {
@@ -555,7 +590,7 @@ static struct spu *find_victim(struct spu_context *ctx)
555 /* 590 /*
556 * Look for a possible preemption candidate on the local node first. 591 * Look for a possible preemption candidate on the local node first.
557 * If there is no candidate look at the other nodes. This isn't 592 * If there is no candidate look at the other nodes. This isn't
558 * exactly fair, but so far the whole spu schedule tries to keep 593 * exactly fair, but so far the whole spu scheduler tries to keep
559 * a strong node affinity. We might want to fine-tune this in 594 * a strong node affinity. We might want to fine-tune this in
560 * the future. 595 * the future.
561 */ 596 */
@@ -571,6 +606,7 @@ static struct spu *find_victim(struct spu_context *ctx)
571 struct spu_context *tmp = spu->ctx; 606 struct spu_context *tmp = spu->ctx;
572 607
573 if (tmp && tmp->prio > ctx->prio && 608 if (tmp && tmp->prio > ctx->prio &&
609 !(tmp->flags & SPU_CREATE_NOSCHED) &&
574 (!victim || tmp->prio > victim->prio)) 610 (!victim || tmp->prio > victim->prio))
575 victim = spu->ctx; 611 victim = spu->ctx;
576 } 612 }
@@ -582,6 +618,10 @@ static struct spu *find_victim(struct spu_context *ctx)
582 * higher priority contexts before lower priority 618 * higher priority contexts before lower priority
583 * ones, so this is safe until we introduce 619 * ones, so this is safe until we introduce
584 * priority inheritance schemes. 620 * priority inheritance schemes.
621 *
622 * XXX if the highest priority context is locked,
623 * this can loop a long time. Might be better to
624 * look at another context or give up after X retries.
585 */ 625 */
586 if (!mutex_trylock(&victim->state_mutex)) { 626 if (!mutex_trylock(&victim->state_mutex)) {
587 victim = NULL; 627 victim = NULL;
@@ -589,10 +629,10 @@ static struct spu *find_victim(struct spu_context *ctx)
589 } 629 }
590 630
591 spu = victim->spu; 631 spu = victim->spu;
592 if (!spu) { 632 if (!spu || victim->prio <= ctx->prio) {
593 /* 633 /*
594 * This race can happen because we've dropped 634 * This race can happen because we've dropped
595 * the active list mutex. No a problem, just 635 * the active list mutex. Not a problem, just
596 * restart the search. 636 * restart the search.
597 */ 637 */
598 mutex_unlock(&victim->state_mutex); 638 mutex_unlock(&victim->state_mutex);
@@ -607,13 +647,10 @@ static struct spu *find_victim(struct spu_context *ctx)
607 647
608 victim->stats.invol_ctx_switch++; 648 victim->stats.invol_ctx_switch++;
609 spu->stats.invol_ctx_switch++; 649 spu->stats.invol_ctx_switch++;
650 spu_add_to_rq(victim);
651
610 mutex_unlock(&victim->state_mutex); 652 mutex_unlock(&victim->state_mutex);
611 /* 653
612 * We need to break out of the wait loop in spu_run
613 * manually to ensure this context gets put on the
614 * runqueue again ASAP.
615 */
616 wake_up(&victim->stop_wq);
617 return spu; 654 return spu;
618 } 655 }
619 } 656 }
@@ -621,6 +658,50 @@ static struct spu *find_victim(struct spu_context *ctx)
621 return NULL; 658 return NULL;
622} 659}
623 660
661static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
662{
663 int node = spu->node;
664 int success = 0;
665
666 spu_set_timeslice(ctx);
667
668 mutex_lock(&cbe_spu_info[node].list_mutex);
669 if (spu->ctx == NULL) {
670 spu_bind_context(spu, ctx);
671 cbe_spu_info[node].nr_active++;
672 spu->alloc_state = SPU_USED;
673 success = 1;
674 }
675 mutex_unlock(&cbe_spu_info[node].list_mutex);
676
677 if (success)
678 wake_up_all(&ctx->run_wq);
679 else
680 spu_add_to_rq(ctx);
681}
682
683static void spu_schedule(struct spu *spu, struct spu_context *ctx)
684{
685 /* not a candidate for interruptible because it's called either
686 from the scheduler thread or from spu_deactivate */
687 mutex_lock(&ctx->state_mutex);
688 __spu_schedule(spu, ctx);
689 spu_release(ctx);
690}
691
692static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
693{
694 int node = spu->node;
695
696 mutex_lock(&cbe_spu_info[node].list_mutex);
697 cbe_spu_info[node].nr_active--;
698 spu->alloc_state = SPU_FREE;
699 spu_unbind_context(spu, ctx);
700 ctx->stats.invol_ctx_switch++;
701 spu->stats.invol_ctx_switch++;
702 mutex_unlock(&cbe_spu_info[node].list_mutex);
703}
704
624/** 705/**
625 * spu_activate - find a free spu for a context and execute it 706 * spu_activate - find a free spu for a context and execute it
626 * @ctx: spu context to schedule 707 * @ctx: spu context to schedule
@@ -632,39 +713,47 @@ static struct spu *find_victim(struct spu_context *ctx)
632 */ 713 */
633int spu_activate(struct spu_context *ctx, unsigned long flags) 714int spu_activate(struct spu_context *ctx, unsigned long flags)
634{ 715{
635 do { 716 struct spu *spu;
636 struct spu *spu;
637 717
638 /* 718 /*
639 * If there are multiple threads waiting for a single context 719 * If there are multiple threads waiting for a single context
640 * only one actually binds the context while the others will 720 * only one actually binds the context while the others will
641 * only be able to acquire the state_mutex once the context 721 * only be able to acquire the state_mutex once the context
642 * already is in runnable state. 722 * already is in runnable state.
643 */ 723 */
644 if (ctx->spu) 724 if (ctx->spu)
645 return 0; 725 return 0;
646 726
647 spu = spu_get_idle(ctx); 727spu_activate_top:
648 /* 728 if (signal_pending(current))
649 * If this is a realtime thread we try to get it running by 729 return -ERESTARTSYS;
650 * preempting a lower priority thread.
651 */
652 if (!spu && rt_prio(ctx->prio))
653 spu = find_victim(ctx);
654 if (spu) {
655 int node = spu->node;
656 730
657 mutex_lock(&cbe_spu_info[node].list_mutex); 731 spu = spu_get_idle(ctx);
658 spu_bind_context(spu, ctx); 732 /*
659 cbe_spu_info[node].nr_active++; 733 * If this is a realtime thread we try to get it running by
660 mutex_unlock(&cbe_spu_info[node].list_mutex); 734 * preempting a lower priority thread.
661 return 0; 735 */
662 } 736 if (!spu && rt_prio(ctx->prio))
737 spu = find_victim(ctx);
738 if (spu) {
739 unsigned long runcntl;
740
741 runcntl = ctx->ops->runcntl_read(ctx);
742 __spu_schedule(spu, ctx);
743 if (runcntl & SPU_RUNCNTL_RUNNABLE)
744 spuctx_switch_state(ctx, SPU_UTIL_USER);
663 745
746 return 0;
747 }
748
749 if (ctx->flags & SPU_CREATE_NOSCHED) {
664 spu_prio_wait(ctx); 750 spu_prio_wait(ctx);
665 } while (!signal_pending(current)); 751 goto spu_activate_top;
752 }
666 753
667 return -ERESTARTSYS; 754 spu_add_to_rq(ctx);
755
756 return 0;
668} 757}
669 758
670/** 759/**
@@ -706,21 +795,19 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
706 if (spu) { 795 if (spu) {
707 new = grab_runnable_context(max_prio, spu->node); 796 new = grab_runnable_context(max_prio, spu->node);
708 if (new || force) { 797 if (new || force) {
709 int node = spu->node; 798 spu_unschedule(spu, ctx);
710 799 if (new) {
711 mutex_lock(&cbe_spu_info[node].list_mutex); 800 if (new->flags & SPU_CREATE_NOSCHED)
712 spu_unbind_context(spu, ctx); 801 wake_up(&new->stop_wq);
713 spu->alloc_state = SPU_FREE; 802 else {
714 cbe_spu_info[node].nr_active--; 803 spu_release(ctx);
715 mutex_unlock(&cbe_spu_info[node].list_mutex); 804 spu_schedule(spu, new);
716 805 /* this one can't easily be made
717 ctx->stats.vol_ctx_switch++; 806 interruptible */
718 spu->stats.vol_ctx_switch++; 807 mutex_lock(&ctx->state_mutex);
719 808 }
720 if (new) 809 }
721 wake_up(&new->stop_wq);
722 } 810 }
723
724 } 811 }
725 812
726 return new != NULL; 813 return new != NULL;
@@ -757,43 +844,38 @@ void spu_yield(struct spu_context *ctx)
757 844
758static noinline void spusched_tick(struct spu_context *ctx) 845static noinline void spusched_tick(struct spu_context *ctx)
759{ 846{
847 struct spu_context *new = NULL;
848 struct spu *spu = NULL;
849 u32 status;
850
851 if (spu_acquire(ctx))
852 BUG(); /* a kernel thread never has signals pending */
853
854 if (ctx->state != SPU_STATE_RUNNABLE)
855 goto out;
856 if (spu_stopped(ctx, &status))
857 goto out;
760 if (ctx->flags & SPU_CREATE_NOSCHED) 858 if (ctx->flags & SPU_CREATE_NOSCHED)
761 return; 859 goto out;
762 if (ctx->policy == SCHED_FIFO) 860 if (ctx->policy == SCHED_FIFO)
763 return; 861 goto out;
764 862
765 if (--ctx->time_slice) 863 if (--ctx->time_slice)
766 return; 864 goto out;
767 865
768 /* 866 spu = ctx->spu;
769 * Unfortunately list_mutex ranks outside of state_mutex, so 867 new = grab_runnable_context(ctx->prio + 1, spu->node);
770 * we have to trylock here. If we fail give the context another 868 if (new) {
771 * tick and try again. 869 spu_unschedule(spu, ctx);
772 */ 870 spu_add_to_rq(ctx);
773 if (mutex_trylock(&ctx->state_mutex)) {
774 struct spu *spu = ctx->spu;
775 struct spu_context *new;
776
777 new = grab_runnable_context(ctx->prio + 1, spu->node);
778 if (new) {
779 spu_unbind_context(spu, ctx);
780 ctx->stats.invol_ctx_switch++;
781 spu->stats.invol_ctx_switch++;
782 spu->alloc_state = SPU_FREE;
783 cbe_spu_info[spu->node].nr_active--;
784 wake_up(&new->stop_wq);
785 /*
786 * We need to break out of the wait loop in
787 * spu_run manually to ensure this context
788 * gets put on the runqueue again ASAP.
789 */
790 wake_up(&ctx->stop_wq);
791 }
792 spu_set_timeslice(ctx);
793 mutex_unlock(&ctx->state_mutex);
794 } else { 871 } else {
795 ctx->time_slice++; 872 ctx->time_slice++;
796 } 873 }
874out:
875 spu_release(ctx);
876
877 if (new)
878 spu_schedule(spu, new);
797} 879}
798 880
799/** 881/**
@@ -817,35 +899,31 @@ static unsigned long count_active_contexts(void)
817} 899}
818 900
819/** 901/**
820 * spu_calc_load - given tick count, update the avenrun load estimates. 902 * spu_calc_load - update the avenrun load estimates.
821 * @tick: tick count
822 * 903 *
823 * No locking against reading these values from userspace, as for 904 * No locking against reading these values from userspace, as for
824 * the CPU loadavg code. 905 * the CPU loadavg code.
825 */ 906 */
826static void spu_calc_load(unsigned long ticks) 907static void spu_calc_load(void)
827{ 908{
828 unsigned long active_tasks; /* fixed-point */ 909 unsigned long active_tasks; /* fixed-point */
829 static int count = LOAD_FREQ; 910
830 911 active_tasks = count_active_contexts() * FIXED_1;
831 count -= ticks; 912 CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
832 913 CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
833 if (unlikely(count < 0)) { 914 CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
834 active_tasks = count_active_contexts() * FIXED_1;
835 do {
836 CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
837 CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
838 CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
839 count += LOAD_FREQ;
840 } while (count < 0);
841 }
842} 915}
843 916
844static void spusched_wake(unsigned long data) 917static void spusched_wake(unsigned long data)
845{ 918{
846 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); 919 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
847 wake_up_process(spusched_task); 920 wake_up_process(spusched_task);
848 spu_calc_load(SPUSCHED_TICK); 921}
922
923static void spuloadavg_wake(unsigned long data)
924{
925 mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
926 spu_calc_load();
849} 927}
850 928
851static int spusched_thread(void *unused) 929static int spusched_thread(void *unused)
@@ -857,17 +935,58 @@ static int spusched_thread(void *unused)
857 set_current_state(TASK_INTERRUPTIBLE); 935 set_current_state(TASK_INTERRUPTIBLE);
858 schedule(); 936 schedule();
859 for (node = 0; node < MAX_NUMNODES; node++) { 937 for (node = 0; node < MAX_NUMNODES; node++) {
860 mutex_lock(&cbe_spu_info[node].list_mutex); 938 struct mutex *mtx = &cbe_spu_info[node].list_mutex;
861 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) 939
862 if (spu->ctx) 940 mutex_lock(mtx);
863 spusched_tick(spu->ctx); 941 list_for_each_entry(spu, &cbe_spu_info[node].spus,
864 mutex_unlock(&cbe_spu_info[node].list_mutex); 942 cbe_list) {
943 struct spu_context *ctx = spu->ctx;
944
945 if (ctx) {
946 mutex_unlock(mtx);
947 spusched_tick(ctx);
948 mutex_lock(mtx);
949 }
950 }
951 mutex_unlock(mtx);
865 } 952 }
866 } 953 }
867 954
868 return 0; 955 return 0;
869} 956}
870 957
958void spuctx_switch_state(struct spu_context *ctx,
959 enum spu_utilization_state new_state)
960{
961 unsigned long long curtime;
962 signed long long delta;
963 struct timespec ts;
964 struct spu *spu;
965 enum spu_utilization_state old_state;
966
967 ktime_get_ts(&ts);
968 curtime = timespec_to_ns(&ts);
969 delta = curtime - ctx->stats.tstamp;
970
971 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
972 WARN_ON(delta < 0);
973
974 spu = ctx->spu;
975 old_state = ctx->stats.util_state;
976 ctx->stats.util_state = new_state;
977 ctx->stats.tstamp = curtime;
978
979 /*
980 * Update the physical SPU utilization statistics.
981 */
982 if (spu) {
983 ctx->stats.times[old_state] += delta;
984 spu->stats.times[old_state] += delta;
985 spu->stats.util_state = new_state;
986 spu->stats.tstamp = curtime;
987 }
988}
989
871#define LOAD_INT(x) ((x) >> FSHIFT) 990#define LOAD_INT(x) ((x) >> FSHIFT)
872#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) 991#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
873 992
@@ -881,7 +1000,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
881 1000
882 /* 1001 /*
883 * Note that last_pid doesn't really make much sense for the 1002 * Note that last_pid doesn't really make much sense for the
884 * SPU loadavg (it even seems very odd on the CPU side..), 1003 * SPU loadavg (it even seems very odd on the CPU side...),
885 * but we include it here to have a 100% compatible interface. 1004 * but we include it here to have a 100% compatible interface.
886 */ 1005 */
887 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n", 1006 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
@@ -922,6 +1041,7 @@ int __init spu_sched_init(void)
922 spin_lock_init(&spu_prio->runq_lock); 1041 spin_lock_init(&spu_prio->runq_lock);
923 1042
924 setup_timer(&spusched_timer, spusched_wake, 0); 1043 setup_timer(&spusched_timer, spusched_wake, 0);
1044 setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
925 1045
926 spusched_task = kthread_run(spusched_thread, NULL, "spusched"); 1046 spusched_task = kthread_run(spusched_thread, NULL, "spusched");
927 if (IS_ERR(spusched_task)) { 1047 if (IS_ERR(spusched_task)) {
@@ -929,6 +1049,8 @@ int __init spu_sched_init(void)
929 goto out_free_spu_prio; 1049 goto out_free_spu_prio;
930 } 1050 }
931 1051
1052 mod_timer(&spuloadavg_timer, 0);
1053
932 entry = create_proc_entry("spu_loadavg", 0, NULL); 1054 entry = create_proc_entry("spu_loadavg", 0, NULL);
933 if (!entry) 1055 if (!entry)
934 goto out_stop_kthread; 1056 goto out_stop_kthread;
@@ -954,6 +1076,7 @@ void spu_sched_exit(void)
954 remove_proc_entry("spu_loadavg", NULL); 1076 remove_proc_entry("spu_loadavg", NULL);
955 1077
956 del_timer_sync(&spusched_timer); 1078 del_timer_sync(&spusched_timer);
1079 del_timer_sync(&spuloadavg_timer);
957 kthread_stop(spusched_task); 1080 kthread_stop(spusched_task);
958 1081
959 for (node = 0; node < MAX_NUMNODES; node++) { 1082 for (node = 0; node < MAX_NUMNODES; node++) {