diff options
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/sched.c')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 386 |
1 files changed, 267 insertions, 119 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index bd6fe4b7a84b..2f25e68b4bac 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -44,17 +44,18 @@ | |||
44 | #include <asm/spu_priv1.h> | 44 | #include <asm/spu_priv1.h> |
45 | #include "spufs.h" | 45 | #include "spufs.h" |
46 | 46 | ||
47 | #define SPU_MIN_TIMESLICE (100 * HZ / 1000) | 47 | #define SPU_TIMESLICE (HZ) |
48 | 48 | ||
49 | #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1) | ||
50 | struct spu_prio_array { | 49 | struct spu_prio_array { |
51 | unsigned long bitmap[SPU_BITMAP_SIZE]; | 50 | DECLARE_BITMAP(bitmap, MAX_PRIO); |
52 | wait_queue_head_t waitq[MAX_PRIO]; | 51 | struct list_head runq[MAX_PRIO]; |
52 | spinlock_t runq_lock; | ||
53 | struct list_head active_list[MAX_NUMNODES]; | 53 | struct list_head active_list[MAX_NUMNODES]; |
54 | struct mutex active_mutex[MAX_NUMNODES]; | 54 | struct mutex active_mutex[MAX_NUMNODES]; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | static struct spu_prio_array *spu_prio; | 57 | static struct spu_prio_array *spu_prio; |
58 | static struct workqueue_struct *spu_sched_wq; | ||
58 | 59 | ||
59 | static inline int node_allowed(int node) | 60 | static inline int node_allowed(int node) |
60 | { | 61 | { |
@@ -68,6 +69,64 @@ static inline int node_allowed(int node) | |||
68 | return 1; | 69 | return 1; |
69 | } | 70 | } |
70 | 71 | ||
72 | void spu_start_tick(struct spu_context *ctx) | ||
73 | { | ||
74 | if (ctx->policy == SCHED_RR) | ||
75 | queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE); | ||
76 | } | ||
77 | |||
78 | void spu_stop_tick(struct spu_context *ctx) | ||
79 | { | ||
80 | if (ctx->policy == SCHED_RR) | ||
81 | cancel_delayed_work(&ctx->sched_work); | ||
82 | } | ||
83 | |||
84 | void spu_sched_tick(struct work_struct *work) | ||
85 | { | ||
86 | struct spu_context *ctx = | ||
87 | container_of(work, struct spu_context, sched_work.work); | ||
88 | struct spu *spu; | ||
89 | int rearm = 1; | ||
90 | |||
91 | mutex_lock(&ctx->state_mutex); | ||
92 | spu = ctx->spu; | ||
93 | if (spu) { | ||
94 | int best = sched_find_first_bit(spu_prio->bitmap); | ||
95 | if (best <= ctx->prio) { | ||
96 | spu_deactivate(ctx); | ||
97 | rearm = 0; | ||
98 | } | ||
99 | } | ||
100 | mutex_unlock(&ctx->state_mutex); | ||
101 | |||
102 | if (rearm) | ||
103 | spu_start_tick(ctx); | ||
104 | } | ||
105 | |||
106 | /** | ||
107 | * spu_add_to_active_list - add spu to active list | ||
108 | * @spu: spu to add to the active list | ||
109 | */ | ||
110 | static void spu_add_to_active_list(struct spu *spu) | ||
111 | { | ||
112 | mutex_lock(&spu_prio->active_mutex[spu->node]); | ||
113 | list_add_tail(&spu->list, &spu_prio->active_list[spu->node]); | ||
114 | mutex_unlock(&spu_prio->active_mutex[spu->node]); | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * spu_remove_from_active_list - remove spu from active list | ||
119 | * @spu: spu to remove from the active list | ||
120 | */ | ||
121 | static void spu_remove_from_active_list(struct spu *spu) | ||
122 | { | ||
123 | int node = spu->node; | ||
124 | |||
125 | mutex_lock(&spu_prio->active_mutex[node]); | ||
126 | list_del_init(&spu->list); | ||
127 | mutex_unlock(&spu_prio->active_mutex[node]); | ||
128 | } | ||
129 | |||
71 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) | 130 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) |
72 | { | 131 | { |
73 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; | 132 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; |
@@ -94,8 +153,12 @@ int spu_switch_event_unregister(struct notifier_block * n) | |||
94 | return blocking_notifier_chain_unregister(&spu_switch_notifier, n); | 153 | return blocking_notifier_chain_unregister(&spu_switch_notifier, n); |
95 | } | 154 | } |
96 | 155 | ||
97 | 156 | /** | |
98 | static inline void bind_context(struct spu *spu, struct spu_context *ctx) | 157 | * spu_bind_context - bind spu context to physical spu |
158 | * @spu: physical spu to bind to | ||
159 | * @ctx: context to bind | ||
160 | */ | ||
161 | static void spu_bind_context(struct spu *spu, struct spu_context *ctx) | ||
99 | { | 162 | { |
100 | pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid, | 163 | pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid, |
101 | spu->number, spu->node); | 164 | spu->number, spu->node); |
@@ -104,7 +167,6 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx) | |||
104 | ctx->spu = spu; | 167 | ctx->spu = spu; |
105 | ctx->ops = &spu_hw_ops; | 168 | ctx->ops = &spu_hw_ops; |
106 | spu->pid = current->pid; | 169 | spu->pid = current->pid; |
107 | spu->prio = current->prio; | ||
108 | spu->mm = ctx->owner; | 170 | spu->mm = ctx->owner; |
109 | mm_needs_global_tlbie(spu->mm); | 171 | mm_needs_global_tlbie(spu->mm); |
110 | spu->ibox_callback = spufs_ibox_callback; | 172 | spu->ibox_callback = spufs_ibox_callback; |
@@ -118,12 +180,21 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx) | |||
118 | spu->timestamp = jiffies; | 180 | spu->timestamp = jiffies; |
119 | spu_cpu_affinity_set(spu, raw_smp_processor_id()); | 181 | spu_cpu_affinity_set(spu, raw_smp_processor_id()); |
120 | spu_switch_notify(spu, ctx); | 182 | spu_switch_notify(spu, ctx); |
183 | spu_add_to_active_list(spu); | ||
184 | ctx->state = SPU_STATE_RUNNABLE; | ||
121 | } | 185 | } |
122 | 186 | ||
123 | static inline void unbind_context(struct spu *spu, struct spu_context *ctx) | 187 | /** |
188 | * spu_unbind_context - unbind spu context from physical spu | ||
189 | * @spu: physical spu to unbind from | ||
190 | * @ctx: context to unbind | ||
191 | */ | ||
192 | static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) | ||
124 | { | 193 | { |
125 | pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__, | 194 | pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__, |
126 | spu->pid, spu->number, spu->node); | 195 | spu->pid, spu->number, spu->node); |
196 | |||
197 | spu_remove_from_active_list(spu); | ||
127 | spu_switch_notify(spu, NULL); | 198 | spu_switch_notify(spu, NULL); |
128 | spu_unmap_mappings(ctx); | 199 | spu_unmap_mappings(ctx); |
129 | spu_save(&ctx->csa, spu); | 200 | spu_save(&ctx->csa, spu); |
@@ -136,95 +207,98 @@ static inline void unbind_context(struct spu *spu, struct spu_context *ctx) | |||
136 | spu->dma_callback = NULL; | 207 | spu->dma_callback = NULL; |
137 | spu->mm = NULL; | 208 | spu->mm = NULL; |
138 | spu->pid = 0; | 209 | spu->pid = 0; |
139 | spu->prio = MAX_PRIO; | ||
140 | ctx->ops = &spu_backing_ops; | 210 | ctx->ops = &spu_backing_ops; |
141 | ctx->spu = NULL; | 211 | ctx->spu = NULL; |
142 | spu->flags = 0; | 212 | spu->flags = 0; |
143 | spu->ctx = NULL; | 213 | spu->ctx = NULL; |
144 | } | 214 | } |
145 | 215 | ||
146 | static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait, | 216 | /** |
147 | int prio) | 217 | * spu_add_to_rq - add a context to the runqueue |
218 | * @ctx: context to add | ||
219 | */ | ||
220 | static void spu_add_to_rq(struct spu_context *ctx) | ||
148 | { | 221 | { |
149 | prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE); | 222 | spin_lock(&spu_prio->runq_lock); |
150 | set_bit(prio, spu_prio->bitmap); | 223 | list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); |
224 | set_bit(ctx->prio, spu_prio->bitmap); | ||
225 | spin_unlock(&spu_prio->runq_lock); | ||
151 | } | 226 | } |
152 | 227 | ||
153 | static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait, | 228 | /** |
154 | int prio) | 229 | * spu_del_from_rq - remove a context from the runqueue |
230 | * @ctx: context to remove | ||
231 | */ | ||
232 | static void spu_del_from_rq(struct spu_context *ctx) | ||
155 | { | 233 | { |
156 | u64 flags; | 234 | spin_lock(&spu_prio->runq_lock); |
157 | 235 | list_del_init(&ctx->rq); | |
158 | __set_current_state(TASK_RUNNING); | 236 | if (list_empty(&spu_prio->runq[ctx->prio])) |
159 | 237 | clear_bit(ctx->prio, spu_prio->bitmap); | |
160 | spin_lock_irqsave(&wq->lock, flags); | 238 | spin_unlock(&spu_prio->runq_lock); |
239 | } | ||
161 | 240 | ||
162 | remove_wait_queue_locked(wq, wait); | 241 | /** |
163 | if (list_empty(&wq->task_list)) | 242 | * spu_grab_context - remove one context from the runqueue |
164 | clear_bit(prio, spu_prio->bitmap); | 243 | * @prio: priority of the context to be removed |
244 | * | ||
245 | * This function removes one context from the runqueue for priority @prio. | ||
246 | * If there is more than one context with the given priority the first | ||
247 | * task on the runqueue will be taken. | ||
248 | * | ||
249 | * Returns the spu_context it just removed. | ||
250 | * | ||
251 | * Must be called with spu_prio->runq_lock held. | ||
252 | */ | ||
253 | static struct spu_context *spu_grab_context(int prio) | ||
254 | { | ||
255 | struct list_head *rq = &spu_prio->runq[prio]; | ||
165 | 256 | ||
166 | spin_unlock_irqrestore(&wq->lock, flags); | 257 | if (list_empty(rq)) |
258 | return NULL; | ||
259 | return list_entry(rq->next, struct spu_context, rq); | ||
167 | } | 260 | } |
168 | 261 | ||
169 | static void spu_prio_wait(struct spu_context *ctx, u64 flags) | 262 | static void spu_prio_wait(struct spu_context *ctx) |
170 | { | 263 | { |
171 | int prio = current->prio; | ||
172 | wait_queue_head_t *wq = &spu_prio->waitq[prio]; | ||
173 | DEFINE_WAIT(wait); | 264 | DEFINE_WAIT(wait); |
174 | 265 | ||
175 | if (ctx->spu) | 266 | set_bit(SPU_SCHED_WAKE, &ctx->sched_flags); |
176 | return; | 267 | prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); |
177 | |||
178 | spu_add_wq(wq, &wait, prio); | ||
179 | |||
180 | if (!signal_pending(current)) { | 268 | if (!signal_pending(current)) { |
181 | up_write(&ctx->state_sema); | 269 | mutex_unlock(&ctx->state_mutex); |
182 | pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__, | ||
183 | current->pid, current->prio); | ||
184 | schedule(); | 270 | schedule(); |
185 | down_write(&ctx->state_sema); | 271 | mutex_lock(&ctx->state_mutex); |
186 | } | 272 | } |
187 | 273 | __set_current_state(TASK_RUNNING); | |
188 | spu_del_wq(wq, &wait, prio); | 274 | remove_wait_queue(&ctx->stop_wq, &wait); |
275 | clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags); | ||
189 | } | 276 | } |
190 | 277 | ||
191 | static void spu_prio_wakeup(void) | 278 | /** |
279 | * spu_reschedule - try to find a runnable context for a spu | ||
280 | * @spu: spu available | ||
281 | * | ||
282 | * This function is called whenever a spu becomes idle. It looks for the | ||
283 | * most suitable runnable spu context and schedules it for execution. | ||
284 | */ | ||
285 | static void spu_reschedule(struct spu *spu) | ||
192 | { | 286 | { |
193 | int best = sched_find_first_bit(spu_prio->bitmap); | 287 | int best; |
194 | if (best < MAX_PRIO) { | ||
195 | wait_queue_head_t *wq = &spu_prio->waitq[best]; | ||
196 | wake_up_interruptible_nr(wq, 1); | ||
197 | } | ||
198 | } | ||
199 | 288 | ||
200 | static int get_active_spu(struct spu *spu) | 289 | spu_free(spu); |
201 | { | ||
202 | int node = spu->node; | ||
203 | struct spu *tmp; | ||
204 | int rc = 0; | ||
205 | 290 | ||
206 | mutex_lock(&spu_prio->active_mutex[node]); | 291 | spin_lock(&spu_prio->runq_lock); |
207 | list_for_each_entry(tmp, &spu_prio->active_list[node], list) { | 292 | best = sched_find_first_bit(spu_prio->bitmap); |
208 | if (tmp == spu) { | 293 | if (best < MAX_PRIO) { |
209 | list_del_init(&spu->list); | 294 | struct spu_context *ctx = spu_grab_context(best); |
210 | rc = 1; | 295 | if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags)) |
211 | break; | 296 | wake_up(&ctx->stop_wq); |
212 | } | ||
213 | } | 297 | } |
214 | mutex_unlock(&spu_prio->active_mutex[node]); | 298 | spin_unlock(&spu_prio->runq_lock); |
215 | return rc; | ||
216 | } | ||
217 | |||
218 | static void put_active_spu(struct spu *spu) | ||
219 | { | ||
220 | int node = spu->node; | ||
221 | |||
222 | mutex_lock(&spu_prio->active_mutex[node]); | ||
223 | list_add_tail(&spu->list, &spu_prio->active_list[node]); | ||
224 | mutex_unlock(&spu_prio->active_mutex[node]); | ||
225 | } | 299 | } |
226 | 300 | ||
227 | static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags) | 301 | static struct spu *spu_get_idle(struct spu_context *ctx) |
228 | { | 302 | { |
229 | struct spu *spu = NULL; | 303 | struct spu *spu = NULL; |
230 | int node = cpu_to_node(raw_smp_processor_id()); | 304 | int node = cpu_to_node(raw_smp_processor_id()); |
@@ -241,87 +315,154 @@ static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags) | |||
241 | return spu; | 315 | return spu; |
242 | } | 316 | } |
243 | 317 | ||
244 | static inline struct spu *spu_get(struct spu_context *ctx, u64 flags) | 318 | /** |
319 | * find_victim - find a lower priority context to preempt | ||
320 | * @ctx: canidate context for running | ||
321 | * | ||
322 | * Returns the freed physical spu to run the new context on. | ||
323 | */ | ||
324 | static struct spu *find_victim(struct spu_context *ctx) | ||
245 | { | 325 | { |
246 | /* Future: spu_get_idle() if possible, | 326 | struct spu_context *victim = NULL; |
247 | * otherwise try to preempt an active | 327 | struct spu *spu; |
248 | * context. | 328 | int node, n; |
329 | |||
330 | /* | ||
331 | * Look for a possible preemption candidate on the local node first. | ||
332 | * If there is no candidate look at the other nodes. This isn't | ||
333 | * exactly fair, but so far the whole spu schedule tries to keep | ||
334 | * a strong node affinity. We might want to fine-tune this in | ||
335 | * the future. | ||
249 | */ | 336 | */ |
250 | return spu_get_idle(ctx, flags); | 337 | restart: |
338 | node = cpu_to_node(raw_smp_processor_id()); | ||
339 | for (n = 0; n < MAX_NUMNODES; n++, node++) { | ||
340 | node = (node < MAX_NUMNODES) ? node : 0; | ||
341 | if (!node_allowed(node)) | ||
342 | continue; | ||
343 | |||
344 | mutex_lock(&spu_prio->active_mutex[node]); | ||
345 | list_for_each_entry(spu, &spu_prio->active_list[node], list) { | ||
346 | struct spu_context *tmp = spu->ctx; | ||
347 | |||
348 | if (tmp->rt_priority < ctx->rt_priority && | ||
349 | (!victim || tmp->rt_priority < victim->rt_priority)) | ||
350 | victim = spu->ctx; | ||
351 | } | ||
352 | mutex_unlock(&spu_prio->active_mutex[node]); | ||
353 | |||
354 | if (victim) { | ||
355 | /* | ||
356 | * This nests ctx->state_mutex, but we always lock | ||
357 | * higher priority contexts before lower priority | ||
358 | * ones, so this is safe until we introduce | ||
359 | * priority inheritance schemes. | ||
360 | */ | ||
361 | if (!mutex_trylock(&victim->state_mutex)) { | ||
362 | victim = NULL; | ||
363 | goto restart; | ||
364 | } | ||
365 | |||
366 | spu = victim->spu; | ||
367 | if (!spu) { | ||
368 | /* | ||
369 | * This race can happen because we've dropped | ||
370 | * the active list mutex. No a problem, just | ||
371 | * restart the search. | ||
372 | */ | ||
373 | mutex_unlock(&victim->state_mutex); | ||
374 | victim = NULL; | ||
375 | goto restart; | ||
376 | } | ||
377 | spu_unbind_context(spu, victim); | ||
378 | mutex_unlock(&victim->state_mutex); | ||
379 | return spu; | ||
380 | } | ||
381 | } | ||
382 | |||
383 | return NULL; | ||
251 | } | 384 | } |
252 | 385 | ||
253 | /* The three externally callable interfaces | 386 | /** |
254 | * for the scheduler begin here. | 387 | * spu_activate - find a free spu for a context and execute it |
388 | * @ctx: spu context to schedule | ||
389 | * @flags: flags (currently ignored) | ||
255 | * | 390 | * |
256 | * spu_activate - bind a context to SPU, waiting as needed. | 391 | * Tries to find a free spu to run @ctx. If no free spu is availble |
257 | * spu_deactivate - unbind a context from its SPU. | 392 | * add the context to the runqueue so it gets woken up once an spu |
258 | * spu_yield - yield an SPU if others are waiting. | 393 | * is available. |
259 | */ | 394 | */ |
260 | 395 | int spu_activate(struct spu_context *ctx, unsigned long flags) | |
261 | int spu_activate(struct spu_context *ctx, u64 flags) | ||
262 | { | 396 | { |
263 | struct spu *spu; | ||
264 | int ret = 0; | ||
265 | 397 | ||
266 | for (;;) { | 398 | if (ctx->spu) |
267 | if (ctx->spu) | 399 | return 0; |
400 | |||
401 | do { | ||
402 | struct spu *spu; | ||
403 | |||
404 | spu = spu_get_idle(ctx); | ||
405 | /* | ||
406 | * If this is a realtime thread we try to get it running by | ||
407 | * preempting a lower priority thread. | ||
408 | */ | ||
409 | if (!spu && ctx->rt_priority) | ||
410 | spu = find_victim(ctx); | ||
411 | if (spu) { | ||
412 | spu_bind_context(spu, ctx); | ||
268 | return 0; | 413 | return 0; |
269 | spu = spu_get(ctx, flags); | ||
270 | if (spu != NULL) { | ||
271 | if (ctx->spu != NULL) { | ||
272 | spu_free(spu); | ||
273 | spu_prio_wakeup(); | ||
274 | break; | ||
275 | } | ||
276 | bind_context(spu, ctx); | ||
277 | put_active_spu(spu); | ||
278 | break; | ||
279 | } | 414 | } |
280 | spu_prio_wait(ctx, flags); | 415 | |
281 | if (signal_pending(current)) { | 416 | spu_add_to_rq(ctx); |
282 | ret = -ERESTARTSYS; | 417 | if (!(flags & SPU_ACTIVATE_NOWAKE)) |
283 | spu_prio_wakeup(); | 418 | spu_prio_wait(ctx); |
284 | break; | 419 | spu_del_from_rq(ctx); |
285 | } | 420 | } while (!signal_pending(current)); |
286 | } | 421 | |
287 | return ret; | 422 | return -ERESTARTSYS; |
288 | } | 423 | } |
289 | 424 | ||
425 | /** | ||
426 | * spu_deactivate - unbind a context from it's physical spu | ||
427 | * @ctx: spu context to unbind | ||
428 | * | ||
429 | * Unbind @ctx from the physical spu it is running on and schedule | ||
430 | * the highest priority context to run on the freed physical spu. | ||
431 | */ | ||
290 | void spu_deactivate(struct spu_context *ctx) | 432 | void spu_deactivate(struct spu_context *ctx) |
291 | { | 433 | { |
292 | struct spu *spu; | 434 | struct spu *spu = ctx->spu; |
293 | int needs_idle; | ||
294 | 435 | ||
295 | spu = ctx->spu; | 436 | if (spu) { |
296 | if (!spu) | 437 | spu_unbind_context(spu, ctx); |
297 | return; | 438 | spu_reschedule(spu); |
298 | needs_idle = get_active_spu(spu); | ||
299 | unbind_context(spu, ctx); | ||
300 | if (needs_idle) { | ||
301 | spu_free(spu); | ||
302 | spu_prio_wakeup(); | ||
303 | } | 439 | } |
304 | } | 440 | } |
305 | 441 | ||
442 | /** | ||
443 | * spu_yield - yield a physical spu if others are waiting | ||
444 | * @ctx: spu context to yield | ||
445 | * | ||
446 | * Check if there is a higher priority context waiting and if yes | ||
447 | * unbind @ctx from the physical spu and schedule the highest | ||
448 | * priority context to run on the freed physical spu instead. | ||
449 | */ | ||
306 | void spu_yield(struct spu_context *ctx) | 450 | void spu_yield(struct spu_context *ctx) |
307 | { | 451 | { |
308 | struct spu *spu; | 452 | struct spu *spu; |
309 | int need_yield = 0; | 453 | int need_yield = 0; |
310 | 454 | ||
311 | if (down_write_trylock(&ctx->state_sema)) { | 455 | if (mutex_trylock(&ctx->state_mutex)) { |
312 | if ((spu = ctx->spu) != NULL) { | 456 | if ((spu = ctx->spu) != NULL) { |
313 | int best = sched_find_first_bit(spu_prio->bitmap); | 457 | int best = sched_find_first_bit(spu_prio->bitmap); |
314 | if (best < MAX_PRIO) { | 458 | if (best < MAX_PRIO) { |
315 | pr_debug("%s: yielding SPU %d NODE %d\n", | 459 | pr_debug("%s: yielding SPU %d NODE %d\n", |
316 | __FUNCTION__, spu->number, spu->node); | 460 | __FUNCTION__, spu->number, spu->node); |
317 | spu_deactivate(ctx); | 461 | spu_deactivate(ctx); |
318 | ctx->state = SPU_STATE_SAVED; | ||
319 | need_yield = 1; | 462 | need_yield = 1; |
320 | } else { | ||
321 | spu->prio = MAX_PRIO; | ||
322 | } | 463 | } |
323 | } | 464 | } |
324 | up_write(&ctx->state_sema); | 465 | mutex_unlock(&ctx->state_mutex); |
325 | } | 466 | } |
326 | if (unlikely(need_yield)) | 467 | if (unlikely(need_yield)) |
327 | yield(); | 468 | yield(); |
@@ -331,14 +472,19 @@ int __init spu_sched_init(void) | |||
331 | { | 472 | { |
332 | int i; | 473 | int i; |
333 | 474 | ||
475 | spu_sched_wq = create_singlethread_workqueue("spusched"); | ||
476 | if (!spu_sched_wq) | ||
477 | return 1; | ||
478 | |||
334 | spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL); | 479 | spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL); |
335 | if (!spu_prio) { | 480 | if (!spu_prio) { |
336 | printk(KERN_WARNING "%s: Unable to allocate priority queue.\n", | 481 | printk(KERN_WARNING "%s: Unable to allocate priority queue.\n", |
337 | __FUNCTION__); | 482 | __FUNCTION__); |
483 | destroy_workqueue(spu_sched_wq); | ||
338 | return 1; | 484 | return 1; |
339 | } | 485 | } |
340 | for (i = 0; i < MAX_PRIO; i++) { | 486 | for (i = 0; i < MAX_PRIO; i++) { |
341 | init_waitqueue_head(&spu_prio->waitq[i]); | 487 | INIT_LIST_HEAD(&spu_prio->runq[i]); |
342 | __clear_bit(i, spu_prio->bitmap); | 488 | __clear_bit(i, spu_prio->bitmap); |
343 | } | 489 | } |
344 | __set_bit(MAX_PRIO, spu_prio->bitmap); | 490 | __set_bit(MAX_PRIO, spu_prio->bitmap); |
@@ -346,6 +492,7 @@ int __init spu_sched_init(void) | |||
346 | mutex_init(&spu_prio->active_mutex[i]); | 492 | mutex_init(&spu_prio->active_mutex[i]); |
347 | INIT_LIST_HEAD(&spu_prio->active_list[i]); | 493 | INIT_LIST_HEAD(&spu_prio->active_list[i]); |
348 | } | 494 | } |
495 | spin_lock_init(&spu_prio->runq_lock); | ||
349 | return 0; | 496 | return 0; |
350 | } | 497 | } |
351 | 498 | ||
@@ -364,4 +511,5 @@ void __exit spu_sched_exit(void) | |||
364 | mutex_unlock(&spu_prio->active_mutex[node]); | 511 | mutex_unlock(&spu_prio->active_mutex[node]); |
365 | } | 512 | } |
366 | kfree(spu_prio); | 513 | kfree(spu_prio); |
514 | destroy_workqueue(spu_sched_wq); | ||
367 | } | 515 | } |