aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c142
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h12
4 files changed, 86 insertions, 76 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 7c51cb54bca1..f084667e4f50 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -56,7 +56,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
56 ctx->rt_priority = current->rt_priority; 56 ctx->rt_priority = current->rt_priority;
57 ctx->policy = current->policy; 57 ctx->policy = current->policy;
58 ctx->prio = current->prio; 58 ctx->prio = current->prio;
59 INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick); 59 ctx->time_slice = SPU_DEF_TIMESLICE;
60 goto out; 60 goto out;
61out_free: 61out_free:
62 kfree(ctx); 62 kfree(ctx);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 3ba30cea764a..89b02b6bfc55 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -144,7 +144,6 @@ static int spu_run_init(struct spu_context *ctx, u32 * npc)
144 ctx->ops->runcntl_write(ctx, runcntl); 144 ctx->ops->runcntl_write(ctx, runcntl);
145 } else { 145 } else {
146 unsigned long mode = SPU_PRIVCNTL_MODE_NORMAL; 146 unsigned long mode = SPU_PRIVCNTL_MODE_NORMAL;
147 spu_start_tick(ctx);
148 ctx->ops->npc_write(ctx, *npc); 147 ctx->ops->npc_write(ctx, *npc);
149 if (test_thread_flag(TIF_SINGLESTEP)) 148 if (test_thread_flag(TIF_SINGLESTEP))
150 mode = SPU_PRIVCNTL_MODE_SINGLE_STEP; 149 mode = SPU_PRIVCNTL_MODE_SINGLE_STEP;
@@ -160,7 +159,6 @@ static int spu_run_fini(struct spu_context *ctx, u32 * npc,
160{ 159{
161 int ret = 0; 160 int ret = 0;
162 161
163 spu_stop_tick(ctx);
164 *status = ctx->ops->status_read(ctx); 162 *status = ctx->ops->status_read(ctx);
165 *npc = ctx->ops->npc_read(ctx); 163 *npc = ctx->ops->npc_read(ctx);
166 spu_release(ctx); 164 spu_release(ctx);
@@ -330,10 +328,8 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
330 328
331 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { 329 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
332 ret = spu_reacquire_runnable(ctx, npc, &status); 330 ret = spu_reacquire_runnable(ctx, npc, &status);
333 if (ret) { 331 if (ret)
334 spu_stop_tick(ctx);
335 goto out2; 332 goto out2;
336 }
337 continue; 333 continue;
338 } 334 }
339 ret = spu_process_events(ctx); 335 ret = spu_process_events(ctx);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 3b831e07f1ed..d673353b6d33 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -35,6 +35,7 @@
35#include <linux/numa.h> 35#include <linux/numa.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/notifier.h> 37#include <linux/notifier.h>
38#include <linux/kthread.h>
38 39
39#include <asm/io.h> 40#include <asm/io.h>
40#include <asm/mmu_context.h> 41#include <asm/mmu_context.h>
@@ -45,6 +46,8 @@
45 46
46#define SPU_TIMESLICE (HZ) 47#define SPU_TIMESLICE (HZ)
47 48
49#define SPUSCHED_TICK (HZ / 100)
50
48struct spu_prio_array { 51struct spu_prio_array {
49 DECLARE_BITMAP(bitmap, MAX_PRIO); 52 DECLARE_BITMAP(bitmap, MAX_PRIO);
50 struct list_head runq[MAX_PRIO]; 53 struct list_head runq[MAX_PRIO];
@@ -54,7 +57,8 @@ struct spu_prio_array {
54}; 57};
55 58
56static struct spu_prio_array *spu_prio; 59static struct spu_prio_array *spu_prio;
57static struct workqueue_struct *spu_sched_wq; 60static struct task_struct *spusched_task;
61static struct timer_list spusched_timer;
58 62
59static inline int node_allowed(int node) 63static inline int node_allowed(int node)
60{ 64{
@@ -68,31 +72,6 @@ static inline int node_allowed(int node)
68 return 1; 72 return 1;
69} 73}
70 74
71void spu_start_tick(struct spu_context *ctx)
72{
73 if (ctx->policy == SCHED_RR) {
74 /*
75 * Make sure the exiting bit is cleared.
76 */
77 clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
78 mb();
79 queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
80 }
81}
82
83void spu_stop_tick(struct spu_context *ctx)
84{
85 if (ctx->policy == SCHED_RR) {
86 /*
87 * While the work can be rearming normally setting this flag
88 * makes sure it does not rearm itself anymore.
89 */
90 set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
91 mb();
92 cancel_delayed_work(&ctx->sched_work);
93 }
94}
95
96/** 75/**
97 * spu_add_to_active_list - add spu to active list 76 * spu_add_to_active_list - add spu to active list
98 * @spu: spu to add to the active list 77 * @spu: spu to add to the active list
@@ -104,6 +83,11 @@ static void spu_add_to_active_list(struct spu *spu)
104 mutex_unlock(&spu_prio->active_mutex[spu->node]); 83 mutex_unlock(&spu_prio->active_mutex[spu->node]);
105} 84}
106 85
86static void __spu_remove_from_active_list(struct spu *spu)
87{
88 list_del_init(&spu->list);
89}
90
107/** 91/**
108 * spu_remove_from_active_list - remove spu from active list 92 * spu_remove_from_active_list - remove spu from active list
109 * @spu: spu to remove from the active list 93 * @spu: spu to remove from the active list
@@ -113,7 +97,7 @@ static void spu_remove_from_active_list(struct spu *spu)
113 int node = spu->node; 97 int node = spu->node;
114 98
115 mutex_lock(&spu_prio->active_mutex[node]); 99 mutex_lock(&spu_prio->active_mutex[node]);
116 list_del_init(&spu->list); 100 __spu_remove_from_active_list(spu);
117 mutex_unlock(&spu_prio->active_mutex[node]); 101 mutex_unlock(&spu_prio->active_mutex[node]);
118} 102}
119 103
@@ -161,7 +145,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
161 spu->timestamp = jiffies; 145 spu->timestamp = jiffies;
162 spu_cpu_affinity_set(spu, raw_smp_processor_id()); 146 spu_cpu_affinity_set(spu, raw_smp_processor_id());
163 spu_switch_notify(spu, ctx); 147 spu_switch_notify(spu, ctx);
164 spu_add_to_active_list(spu);
165 ctx->state = SPU_STATE_RUNNABLE; 148 ctx->state = SPU_STATE_RUNNABLE;
166} 149}
167 150
@@ -175,7 +158,6 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
175 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__, 158 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
176 spu->pid, spu->number, spu->node); 159 spu->pid, spu->number, spu->node);
177 160
178 spu_remove_from_active_list(spu);
179 spu_switch_notify(spu, NULL); 161 spu_switch_notify(spu, NULL);
180 spu_unmap_mappings(ctx); 162 spu_unmap_mappings(ctx);
181 spu_save(&ctx->csa, spu); 163 spu_save(&ctx->csa, spu);
@@ -312,6 +294,7 @@ static struct spu *find_victim(struct spu_context *ctx)
312 victim = NULL; 294 victim = NULL;
313 goto restart; 295 goto restart;
314 } 296 }
297 spu_remove_from_active_list(spu);
315 spu_unbind_context(spu, victim); 298 spu_unbind_context(spu, victim);
316 mutex_unlock(&victim->state_mutex); 299 mutex_unlock(&victim->state_mutex);
317 /* 300 /*
@@ -354,6 +337,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
354 spu = find_victim(ctx); 337 spu = find_victim(ctx);
355 if (spu) { 338 if (spu) {
356 spu_bind_context(spu, ctx); 339 spu_bind_context(spu, ctx);
340 spu_add_to_active_list(spu);
357 return 0; 341 return 0;
358 } 342 }
359 343
@@ -397,6 +381,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
397 if (spu) { 381 if (spu) {
398 new = grab_runnable_context(max_prio); 382 new = grab_runnable_context(max_prio);
399 if (new || force) { 383 if (new || force) {
384 spu_remove_from_active_list(spu);
400 spu_unbind_context(spu, ctx); 385 spu_unbind_context(spu, ctx);
401 spu_free(spu); 386 spu_free(spu);
402 if (new) 387 if (new)
@@ -437,51 +422,78 @@ void spu_yield(struct spu_context *ctx)
437 } 422 }
438} 423}
439 424
440void spu_sched_tick(struct work_struct *work) 425static void spusched_tick(struct spu_context *ctx)
441{ 426{
442 struct spu_context *ctx = 427 if (ctx->policy != SCHED_RR || --ctx->time_slice)
443 container_of(work, struct spu_context, sched_work.work); 428 return;
444 int preempted;
445 429
446 /* 430 /*
447 * If this context is being stopped avoid rescheduling from the 431 * Unfortunately active_mutex ranks outside of state_mutex, so
448 * scheduler tick because we would block on the state_mutex. 432 * we have to trylock here. If we fail give the context another
449 * The caller will yield the spu later on anyway. 433 * tick and try again.
450 */ 434 */
451 if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags)) 435 if (mutex_trylock(&ctx->state_mutex)) {
452 return; 436 struct spu_context *new = grab_runnable_context(ctx->prio + 1);
453 437 if (new) {
454 mutex_lock(&ctx->state_mutex); 438 struct spu *spu = ctx->spu;
455 preempted = __spu_deactivate(ctx, 0, ctx->prio + 1);
456 mutex_unlock(&ctx->state_mutex);
457 439
458 if (preempted) { 440 __spu_remove_from_active_list(spu);
459 /* 441 spu_unbind_context(spu, ctx);
460 * We need to break out of the wait loop in spu_run manually 442 spu_free(spu);
461 * to ensure this context gets put on the runqueue again 443 wake_up(&new->stop_wq);
462 * ASAP. 444 /*
463 */ 445 * We need to break out of the wait loop in
464 wake_up(&ctx->stop_wq); 446 * spu_run manually to ensure this context
447 * gets put on the runqueue again ASAP.
448 */
449 wake_up(&ctx->stop_wq);
450 }
451 ctx->time_slice = SPU_DEF_TIMESLICE;
452 mutex_unlock(&ctx->state_mutex);
465 } else { 453 } else {
466 spu_start_tick(ctx); 454 ctx->time_slice++;
467 } 455 }
468} 456}
469 457
458static void spusched_wake(unsigned long data)
459{
460 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
461 wake_up_process(spusched_task);
462}
463
464static int spusched_thread(void *unused)
465{
466 struct spu *spu, *next;
467 int node;
468
469 setup_timer(&spusched_timer, spusched_wake, 0);
470 __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
471
472 while (!kthread_should_stop()) {
473 set_current_state(TASK_INTERRUPTIBLE);
474 schedule();
475 for (node = 0; node < MAX_NUMNODES; node++) {
476 mutex_lock(&spu_prio->active_mutex[node]);
477 list_for_each_entry_safe(spu, next,
478 &spu_prio->active_list[node],
479 list)
480 spusched_tick(spu->ctx);
481 mutex_unlock(&spu_prio->active_mutex[node]);
482 }
483 }
484
485 del_timer_sync(&spusched_timer);
486 return 0;
487}
488
470int __init spu_sched_init(void) 489int __init spu_sched_init(void)
471{ 490{
472 int i; 491 int i;
473 492
474 spu_sched_wq = create_singlethread_workqueue("spusched");
475 if (!spu_sched_wq)
476 return 1;
477
478 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL); 493 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
479 if (!spu_prio) { 494 if (!spu_prio)
480 printk(KERN_WARNING "%s: Unable to allocate priority queue.\n", 495 return -ENOMEM;
481 __FUNCTION__); 496
482 destroy_workqueue(spu_sched_wq);
483 return 1;
484 }
485 for (i = 0; i < MAX_PRIO; i++) { 497 for (i = 0; i < MAX_PRIO; i++) {
486 INIT_LIST_HEAD(&spu_prio->runq[i]); 498 INIT_LIST_HEAD(&spu_prio->runq[i]);
487 __clear_bit(i, spu_prio->bitmap); 499 __clear_bit(i, spu_prio->bitmap);
@@ -492,7 +504,14 @@ int __init spu_sched_init(void)
492 INIT_LIST_HEAD(&spu_prio->active_list[i]); 504 INIT_LIST_HEAD(&spu_prio->active_list[i]);
493 } 505 }
494 spin_lock_init(&spu_prio->runq_lock); 506 spin_lock_init(&spu_prio->runq_lock);
507
508 spusched_task = kthread_run(spusched_thread, NULL, "spusched");
509 if (IS_ERR(spusched_task)) {
510 kfree(spu_prio);
511 return PTR_ERR(spusched_task);
512 }
495 return 0; 513 return 0;
514
496} 515}
497 516
498void __exit spu_sched_exit(void) 517void __exit spu_sched_exit(void)
@@ -500,6 +519,8 @@ void __exit spu_sched_exit(void)
500 struct spu *spu, *tmp; 519 struct spu *spu, *tmp;
501 int node; 520 int node;
502 521
522 kthread_stop(spusched_task);
523
503 for (node = 0; node < MAX_NUMNODES; node++) { 524 for (node = 0; node < MAX_NUMNODES; node++) {
504 mutex_lock(&spu_prio->active_mutex[node]); 525 mutex_lock(&spu_prio->active_mutex[node]);
505 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node], 526 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
@@ -510,5 +531,4 @@ void __exit spu_sched_exit(void)
510 mutex_unlock(&spu_prio->active_mutex[node]); 531 mutex_unlock(&spu_prio->active_mutex[node]);
511 } 532 }
512 kfree(spu_prio); 533 kfree(spu_prio);
513 destroy_workqueue(spu_sched_wq);
514} 534}
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 47617e8014a5..8068171dfa9c 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -31,6 +31,8 @@
31#include <asm/spu_csa.h> 31#include <asm/spu_csa.h>
32#include <asm/spu_info.h> 32#include <asm/spu_info.h>
33 33
34#define SPU_DEF_TIMESLICE 100
35
34/* The magic number for our file system */ 36/* The magic number for our file system */
35enum { 37enum {
36 SPUFS_MAGIC = 0x23c9b64e, 38 SPUFS_MAGIC = 0x23c9b64e,
@@ -39,11 +41,6 @@ enum {
39struct spu_context_ops; 41struct spu_context_ops;
40struct spu_gang; 42struct spu_gang;
41 43
42/* ctx->sched_flags */
43enum {
44 SPU_SCHED_EXITING = 0,
45};
46
47struct spu_context { 44struct spu_context {
48 struct spu *spu; /* pointer to a physical SPU */ 45 struct spu *spu; /* pointer to a physical SPU */
49 struct spu_state csa; /* SPU context save area. */ 46 struct spu_state csa; /* SPU context save area. */
@@ -83,7 +80,7 @@ struct spu_context {
83 80
84 /* scheduler fields */ 81 /* scheduler fields */
85 struct list_head rq; 82 struct list_head rq;
86 struct delayed_work sched_work; 83 unsigned int time_slice;
87 unsigned long sched_flags; 84 unsigned long sched_flags;
88 unsigned long rt_priority; 85 unsigned long rt_priority;
89 int policy; 86 int policy;
@@ -200,9 +197,6 @@ void spu_acquire_saved(struct spu_context *ctx);
200int spu_activate(struct spu_context *ctx, unsigned long flags); 197int spu_activate(struct spu_context *ctx, unsigned long flags);
201void spu_deactivate(struct spu_context *ctx); 198void spu_deactivate(struct spu_context *ctx);
202void spu_yield(struct spu_context *ctx); 199void spu_yield(struct spu_context *ctx);
203void spu_start_tick(struct spu_context *ctx);
204void spu_stop_tick(struct spu_context *ctx);
205void spu_sched_tick(struct work_struct *work);
206int __init spu_sched_init(void); 200int __init spu_sched_init(void);
207void __exit spu_sched_exit(void); 201void __exit spu_sched_exit(void);
208 202