aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 4d257b3f9336..0117eb8f6a91 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -105,15 +105,15 @@ void spu_set_timeslice(struct spu_context *ctx)
105void __spu_update_sched_info(struct spu_context *ctx) 105void __spu_update_sched_info(struct spu_context *ctx)
106{ 106{
107 /* 107 /*
108 * 32-Bit assignment are atomic on powerpc, and we don't care about 108 * 32-Bit assignments are atomic on powerpc, and we don't care about
109 * memory ordering here because retriving the controlling thread is 109 * memory ordering here because retrieving the controlling thread is
110 * per defintion racy. 110 * per definition racy.
111 */ 111 */
112 ctx->tid = current->pid; 112 ctx->tid = current->pid;
113 113
114 /* 114 /*
115 * We do our own priority calculations, so we normally want 115 * We do our own priority calculations, so we normally want
116 * ->static_prio to start with. Unfortunately thies field 116 * ->static_prio to start with. Unfortunately this field
117 * contains junk for threads with a realtime scheduling 117 * contains junk for threads with a realtime scheduling
118 * policy so we have to look at ->prio in this case. 118 * policy so we have to look at ->prio in this case.
119 */ 119 */
@@ -127,7 +127,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
127 * A lot of places that don't hold list_mutex poke into 127 * A lot of places that don't hold list_mutex poke into
128 * cpus_allowed, including grab_runnable_context which 128 * cpus_allowed, including grab_runnable_context which
129 * already holds the runq_lock. So abuse runq_lock 129 * already holds the runq_lock. So abuse runq_lock
130 * to protect this field aswell. 130 * to protect this field as well.
131 */ 131 */
132 spin_lock(&spu_prio->runq_lock); 132 spin_lock(&spu_prio->runq_lock);
133 ctx->cpus_allowed = current->cpus_allowed; 133 ctx->cpus_allowed = current->cpus_allowed;
@@ -182,7 +182,7 @@ static void notify_spus_active(void)
182 * Wake up the active spu_contexts. 182 * Wake up the active spu_contexts.
183 * 183 *
184 * When the awakened processes see their "notify_active" flag is set, 184 * When the awakened processes see their "notify_active" flag is set,
185 * they will call spu_switch_notify(); 185 * they will call spu_switch_notify().
186 */ 186 */
187 for_each_online_node(node) { 187 for_each_online_node(node) {
188 struct spu *spu; 188 struct spu *spu;
@@ -579,7 +579,7 @@ static struct spu *find_victim(struct spu_context *ctx)
579 /* 579 /*
580 * Look for a possible preemption candidate on the local node first. 580 * Look for a possible preemption candidate on the local node first.
581 * If there is no candidate look at the other nodes. This isn't 581 * If there is no candidate look at the other nodes. This isn't
582 * exactly fair, but so far the whole spu schedule tries to keep 582 * exactly fair, but so far the whole spu scheduler tries to keep
583 * a strong node affinity. We might want to fine-tune this in 583 * a strong node affinity. We might want to fine-tune this in
584 * the future. 584 * the future.
585 */ 585 */
@@ -905,7 +905,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
905 905
906 /* 906 /*
907 * Note that last_pid doesn't really make much sense for the 907 * Note that last_pid doesn't really make much sense for the
908 * SPU loadavg (it even seems very odd on the CPU side..), 908 * SPU loadavg (it even seems very odd on the CPU side...),
909 * but we include it here to have a 100% compatible interface. 909 * but we include it here to have a 100% compatible interface.
910 */ 910 */
911 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n", 911 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",