aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2005-12-05 22:52:25 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-08 22:52:55 -0500
commit5110459f181ef1f11200bb3dec61953f08cc49e7 (patch)
tree73356ce50b3fb5055b4a6f39f237f046615f797d /arch/powerpc/platforms
parent3b3d22cb84a0bb12f6bbb2b1158972894bec3f21 (diff)
[PATCH] spufs: Improved SPU preemptability.
This patch makes it easier to preempt an SPU context by having the scheduler hold ctx->state_sema for much shorter periods of time. As part of this restructuring, the control logic for the "run" operation is moved from arch/ppc64/kernel/spu_base.c to fs/spufs/file.c. Of course the base retains "bottom half" handlers for class{0,1} irqs. The new run loop will re-acquire an SPU if preempted. From: Mark Nutter <mnutter@us.ibm.com> Signed-off-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c93
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c19
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c5
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c117
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c17
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c67
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h5
7 files changed, 202 insertions, 121 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 167580ce8691..8abd4bd19665 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -130,7 +130,8 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
130 spu->dar = ea; 130 spu->dar = ea;
131 spu->dsisr = dsisr; 131 spu->dsisr = dsisr;
132 mb(); 132 mb();
133 wake_up(&spu->stop_wq); 133 if (spu->stop_callback)
134 spu->stop_callback(spu);
134 return 0; 135 return 0;
135} 136}
136 137
@@ -151,7 +152,8 @@ static int __spu_trap_stop(struct spu *spu)
151{ 152{
152 pr_debug("%s\n", __FUNCTION__); 153 pr_debug("%s\n", __FUNCTION__);
153 spu->stop_code = in_be32(&spu->problem->spu_status_R); 154 spu->stop_code = in_be32(&spu->problem->spu_status_R);
154 wake_up(&spu->stop_wq); 155 if (spu->stop_callback)
156 spu->stop_callback(spu);
155 return 0; 157 return 0;
156} 158}
157 159
@@ -159,7 +161,8 @@ static int __spu_trap_halt(struct spu *spu)
159{ 161{
160 pr_debug("%s\n", __FUNCTION__); 162 pr_debug("%s\n", __FUNCTION__);
161 spu->stop_code = in_be32(&spu->problem->spu_status_R); 163 spu->stop_code = in_be32(&spu->problem->spu_status_R);
162 wake_up(&spu->stop_wq); 164 if (spu->stop_callback)
165 spu->stop_callback(spu);
163 return 0; 166 return 0;
164} 167}
165 168
@@ -190,12 +193,13 @@ spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
190 193
191 spu = data; 194 spu = data;
192 spu->class_0_pending = 1; 195 spu->class_0_pending = 1;
193 wake_up(&spu->stop_wq); 196 if (spu->stop_callback)
197 spu->stop_callback(spu);
194 198
195 return IRQ_HANDLED; 199 return IRQ_HANDLED;
196} 200}
197 201
198static int 202int
199spu_irq_class_0_bottom(struct spu *spu) 203spu_irq_class_0_bottom(struct spu *spu)
200{ 204{
201 unsigned long stat; 205 unsigned long stat;
@@ -214,8 +218,10 @@ spu_irq_class_0_bottom(struct spu *spu)
214 __spu_trap_error(spu); 218 __spu_trap_error(spu);
215 219
216 out_be64(&spu->priv1->int_stat_class0_RW, stat); 220 out_be64(&spu->priv1->int_stat_class0_RW, stat);
217 return 0; 221
222 return (stat & 0x7) ? -EIO : 0;
218} 223}
224EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
219 225
220static irqreturn_t 226static irqreturn_t
221spu_irq_class_1(int irq, void *data, struct pt_regs *regs) 227spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
@@ -250,6 +256,7 @@ spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
250 256
251 return stat ? IRQ_HANDLED : IRQ_NONE; 257 return stat ? IRQ_HANDLED : IRQ_NONE;
252} 258}
259EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
253 260
254static irqreturn_t 261static irqreturn_t
255spu_irq_class_2(int irq, void *data, struct pt_regs *regs) 262spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
@@ -478,7 +485,7 @@ bad_area:
478 return -EFAULT; 485 return -EFAULT;
479} 486}
480 487
481static int spu_handle_pte_fault(struct spu *spu) 488int spu_irq_class_1_bottom(struct spu *spu)
482{ 489{
483 u64 ea, dsisr, access, error = 0UL; 490 u64 ea, dsisr, access, error = 0UL;
484 int ret = 0; 491 int ret = 0;
@@ -508,76 +515,6 @@ static int spu_handle_pte_fault(struct spu *spu)
508 return ret; 515 return ret;
509} 516}
510 517
511static inline int spu_pending(struct spu *spu, u32 * stat)
512{
513 struct spu_problem __iomem *prob = spu->problem;
514 u64 pte_fault;
515
516 *stat = in_be32(&prob->spu_status_R);
517 pte_fault = spu->dsisr &
518 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
519 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
520}
521
522int spu_run(struct spu *spu)
523{
524 struct spu_problem __iomem *prob;
525 struct spu_priv1 __iomem *priv1;
526 struct spu_priv2 __iomem *priv2;
527 u32 status;
528 int ret;
529
530 prob = spu->problem;
531 priv1 = spu->priv1;
532 priv2 = spu->priv2;
533
534 /* Let SPU run. */
535 eieio();
536 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
537
538 do {
539 ret = wait_event_interruptible(spu->stop_wq,
540 spu_pending(spu, &status));
541
542 if (spu->dsisr &
543 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
544 ret = spu_handle_pte_fault(spu);
545
546 if (spu->class_0_pending)
547 spu_irq_class_0_bottom(spu);
548
549 if (!ret && signal_pending(current))
550 ret = -ERESTARTSYS;
551
552 } while (!ret && !(status &
553 (SPU_STATUS_STOPPED_BY_STOP |
554 SPU_STATUS_STOPPED_BY_HALT)));
555
556 /* Ensure SPU is stopped. */
557 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
558 eieio();
559 while (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)
560 cpu_relax();
561
562 out_be64(&priv2->slb_invalidate_all_W, 0);
563 out_be64(&priv1->tlb_invalidate_entry_W, 0UL);
564 eieio();
565
566 /* Check for SPU breakpoint. */
567 if (unlikely(current->ptrace & PT_PTRACED)) {
568 status = in_be32(&prob->spu_status_R);
569
570 if ((status & SPU_STATUS_STOPPED_BY_STOP)
571 && status >> SPU_STOP_STATUS_SHIFT == 0x3fff) {
572 force_sig(SIGTRAP, current);
573 ret = -ERESTARTSYS;
574 }
575 }
576
577 return ret;
578}
579EXPORT_SYMBOL_GPL(spu_run);
580
581static void __iomem * __init map_spe_prop(struct device_node *n, 518static void __iomem * __init map_spe_prop(struct device_node *n,
582 const char *name) 519 const char *name)
583{ 520{
@@ -693,9 +630,9 @@ static int __init create_spu(struct device_node *spe)
693 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1)); 630 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
694 out_be64(&spu->priv1->mfc_sr1_RW, 0x33); 631 out_be64(&spu->priv1->mfc_sr1_RW, 0x33);
695 632
696 init_waitqueue_head(&spu->stop_wq);
697 spu->ibox_callback = NULL; 633 spu->ibox_callback = NULL;
698 spu->wbox_callback = NULL; 634 spu->wbox_callback = NULL;
635 spu->stop_callback = NULL;
699 636
700 down(&spu_mutex); 637 down(&spu_mutex);
701 spu->number = number++; 638 spu->number = number++;
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index caf0984064e2..66567c109965 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -232,6 +232,23 @@ static char *spu_backing_get_ls(struct spu_context *ctx)
232 return ctx->csa.lscsa->ls; 232 return ctx->csa.lscsa->ls;
233} 233}
234 234
235static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
236{
237 spin_lock(&ctx->csa.register_lock);
238 ctx->csa.prob.spu_runcntl_RW = val;
239 if (val & SPU_RUNCNTL_RUNNABLE) {
240 ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
241 } else {
242 ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
243 }
244 spin_unlock(&ctx->csa.register_lock);
245}
246
247static void spu_backing_runcntl_stop(struct spu_context *ctx)
248{
249 spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
250}
251
235struct spu_context_ops spu_backing_ops = { 252struct spu_context_ops spu_backing_ops = {
236 .mbox_read = spu_backing_mbox_read, 253 .mbox_read = spu_backing_mbox_read,
237 .mbox_stat_read = spu_backing_mbox_stat_read, 254 .mbox_stat_read = spu_backing_mbox_stat_read,
@@ -249,4 +266,6 @@ struct spu_context_ops spu_backing_ops = {
249 .npc_write = spu_backing_npc_write, 266 .npc_write = spu_backing_npc_write,
250 .status_read = spu_backing_status_read, 267 .status_read = spu_backing_status_read,
251 .get_ls = spu_backing_get_ls, 268 .get_ls = spu_backing_get_ls,
269 .runcntl_write = spu_backing_runcntl_write,
270 .runcntl_stop = spu_backing_runcntl_stop,
252}; 271};
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 5d6195fc107d..0d88a1c24f67 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -45,6 +45,7 @@ struct spu_context *alloc_spu_context(struct address_space *local_store)
45 init_rwsem(&ctx->state_sema); 45 init_rwsem(&ctx->state_sema);
46 init_waitqueue_head(&ctx->ibox_wq); 46 init_waitqueue_head(&ctx->ibox_wq);
47 init_waitqueue_head(&ctx->wbox_wq); 47 init_waitqueue_head(&ctx->wbox_wq);
48 init_waitqueue_head(&ctx->stop_wq);
48 ctx->ibox_fasync = NULL; 49 ctx->ibox_fasync = NULL;
49 ctx->wbox_fasync = NULL; 50 ctx->wbox_fasync = NULL;
50 ctx->state = SPU_STATE_SAVED; 51 ctx->state = SPU_STATE_SAVED;
@@ -105,7 +106,7 @@ void spu_release(struct spu_context *ctx)
105 up_read(&ctx->state_sema); 106 up_read(&ctx->state_sema);
106} 107}
107 108
108static void spu_unmap_mappings(struct spu_context *ctx) 109void spu_unmap_mappings(struct spu_context *ctx)
109{ 110{
110 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); 111 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
111} 112}
@@ -126,7 +127,6 @@ int spu_acquire_runnable(struct spu_context *ctx)
126 127
127 down_write(&ctx->state_sema); 128 down_write(&ctx->state_sema);
128 if (ctx->state == SPU_STATE_SAVED) { 129 if (ctx->state == SPU_STATE_SAVED) {
129 spu_unmap_mappings(ctx);
130 ret = spu_activate(ctx, 0); 130 ret = spu_activate(ctx, 0);
131 ctx->state = SPU_STATE_RUNNABLE; 131 ctx->state = SPU_STATE_RUNNABLE;
132 } 132 }
@@ -154,7 +154,6 @@ void spu_acquire_saved(struct spu_context *ctx)
154 down_write(&ctx->state_sema); 154 down_write(&ctx->state_sema);
155 155
156 if (ctx->state == SPU_STATE_RUNNABLE) { 156 if (ctx->state == SPU_STATE_RUNNABLE) {
157 spu_unmap_mappings(ctx);
158 spu_deactivate(ctx); 157 spu_deactivate(ctx);
159 ctx->state = SPU_STATE_SAVED; 158 ctx->state = SPU_STATE_SAVED;
160 } 159 }
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 0fe1feccc02d..af5adc372224 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/pagemap.h> 26#include <linux/pagemap.h>
27#include <linux/poll.h> 27#include <linux/poll.h>
28#include <linux/ptrace.h>
28 29
29#include <asm/io.h> 30#include <asm/io.h>
30#include <asm/semaphore.h> 31#include <asm/semaphore.h>
@@ -540,26 +541,122 @@ static struct file_operations spufs_wbox_stat_fops = {
540 .read = spufs_wbox_stat_read, 541 .read = spufs_wbox_stat_read,
541}; 542};
542 543
543long spufs_run_spu(struct file *file, struct spu_context *ctx, 544/* interrupt-level stop callback function. */
544 u32 *npc, u32 *status) 545void spufs_stop_callback(struct spu *spu)
546{
547 struct spu_context *ctx = spu->ctx;
548
549 wake_up_all(&ctx->stop_wq);
550}
551
552static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
553{
554 struct spu *spu;
555 u64 pte_fault;
556
557 *stat = ctx->ops->status_read(ctx);
558 if (ctx->state != SPU_STATE_RUNNABLE)
559 return 1;
560 spu = ctx->spu;
561 pte_fault = spu->dsisr &
562 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
563 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
564}
565
566static inline int spu_run_init(struct spu_context *ctx, u32 * npc,
567 u32 * status)
545{ 568{
546 int ret; 569 int ret;
547 570
548 ret = spu_acquire_runnable(ctx); 571 if ((ret = spu_acquire_runnable(ctx)) != 0)
549 if (ret)
550 return ret; 572 return ret;
551
552 ctx->ops->npc_write(ctx, *npc); 573 ctx->ops->npc_write(ctx, *npc);
574 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
575 return 0;
576}
553 577
554 ret = spu_run(ctx->spu); 578static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
555 579 u32 * status)
556 if (!ret) 580{
557 ret = ctx->ops->status_read(ctx); 581 int ret = 0;
558 582
583 *status = ctx->ops->status_read(ctx);
559 *npc = ctx->ops->npc_read(ctx); 584 *npc = ctx->ops->npc_read(ctx);
560
561 spu_release(ctx); 585 spu_release(ctx);
586
587 if (signal_pending(current))
588 ret = -ERESTARTSYS;
589 if (unlikely(current->ptrace & PT_PTRACED)) {
590 if ((*status & SPU_STATUS_STOPPED_BY_STOP)
591 && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
592 force_sig(SIGTRAP, current);
593 ret = -ERESTARTSYS;
594 }
595 }
596 return ret;
597}
598
599static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
600 u32 *status)
601{
602 int ret;
603
604 if ((ret = spu_run_fini(ctx, npc, status)) != 0)
605 return ret;
606 if (*status & (SPU_STATUS_STOPPED_BY_STOP |
607 SPU_STATUS_STOPPED_BY_HALT)) {
608 return *status;
609 }
610 if ((ret = spu_run_init(ctx, npc, status)) != 0)
611 return ret;
612 return 0;
613}
614
615static inline int spu_process_events(struct spu_context *ctx)
616{
617 struct spu *spu = ctx->spu;
618 u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
619 int ret = 0;
620
621 if (spu->dsisr & pte_fault)
622 ret = spu_irq_class_1_bottom(spu);
623 if (spu->class_0_pending)
624 ret = spu_irq_class_0_bottom(spu);
625 if (!ret && signal_pending(current))
626 ret = -ERESTARTSYS;
627 return ret;
628}
629
630long spufs_run_spu(struct file *file, struct spu_context *ctx,
631 u32 * npc, u32 * status)
632{
633 int ret;
634
635 if ((ret = spu_run_init(ctx, npc, status)) != 0)
636 return ret;
637
638 do {
639 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status));
640 if (unlikely(ret))
641 break;
642 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
643 ret = spu_reacquire_runnable(ctx, npc, status);
644 if (ret) {
645 return ret;
646 }
647 continue;
648 }
649 ret = spu_process_events(ctx);
650
651 } while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP |
652 SPU_STATUS_STOPPED_BY_HALT)));
653
654 ctx->ops->runcntl_stop(ctx);
655 ret = spu_run_fini(ctx, npc, status);
656 if (!ret)
657 ret = *status;
562 spu_yield(ctx); 658 spu_yield(ctx);
659
563 return ret; 660 return ret;
564} 661}
565 662
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index 2e90cae98a87..68812415ee29 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -186,6 +186,21 @@ static char *spu_hw_get_ls(struct spu_context *ctx)
186 return ctx->spu->local_store; 186 return ctx->spu->local_store;
187} 187}
188 188
189static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
190{
191 eieio();
192 out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
193}
194
195static void spu_hw_runcntl_stop(struct spu_context *ctx)
196{
197 spin_lock_irq(&ctx->spu->register_lock);
198 out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
199 while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
200 cpu_relax();
201 spin_unlock_irq(&ctx->spu->register_lock);
202}
203
189struct spu_context_ops spu_hw_ops = { 204struct spu_context_ops spu_hw_ops = {
190 .mbox_read = spu_hw_mbox_read, 205 .mbox_read = spu_hw_mbox_read,
191 .mbox_stat_read = spu_hw_mbox_stat_read, 206 .mbox_stat_read = spu_hw_mbox_stat_read,
@@ -203,4 +218,6 @@ struct spu_context_ops spu_hw_ops = {
203 .npc_write = spu_hw_npc_write, 218 .npc_write = spu_hw_npc_write,
204 .status_read = spu_hw_status_read, 219 .status_read = spu_hw_status_read,
205 .get_ls = spu_hw_get_ls, 220 .get_ls = spu_hw_get_ls,
221 .runcntl_write = spu_hw_runcntl_write,
222 .runcntl_stop = spu_hw_runcntl_stop,
206}; 223};
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 575027062b0b..e2f10b5b8a6a 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -119,7 +119,8 @@ static void prio_wakeup(struct spu_runqueue *rq)
119 } 119 }
120} 120}
121 121
122static void prio_wait(struct spu_runqueue *rq, u64 flags) 122static void prio_wait(struct spu_runqueue *rq, struct spu_context *ctx,
123 u64 flags)
123{ 124{
124 int prio = current->prio; 125 int prio = current->prio;
125 wait_queue_head_t *wq = &rq->prio.waitq[prio]; 126 wait_queue_head_t *wq = &rq->prio.waitq[prio];
@@ -130,9 +131,11 @@ static void prio_wait(struct spu_runqueue *rq, u64 flags)
130 prepare_to_wait_exclusive(wq, &wait, TASK_INTERRUPTIBLE); 131 prepare_to_wait_exclusive(wq, &wait, TASK_INTERRUPTIBLE);
131 if (!signal_pending(current)) { 132 if (!signal_pending(current)) {
132 up(&rq->sem); 133 up(&rq->sem);
134 up_write(&ctx->state_sema);
133 pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__, 135 pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
134 current->pid, current->prio); 136 current->pid, current->prio);
135 schedule(); 137 schedule();
138 down_write(&ctx->state_sema);
136 down(&rq->sem); 139 down(&rq->sem);
137 } 140 }
138 finish_wait(wq, &wait); 141 finish_wait(wq, &wait);
@@ -173,7 +176,9 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx)
173 mm_needs_global_tlbie(spu->mm); 176 mm_needs_global_tlbie(spu->mm);
174 spu->ibox_callback = spufs_ibox_callback; 177 spu->ibox_callback = spufs_ibox_callback;
175 spu->wbox_callback = spufs_wbox_callback; 178 spu->wbox_callback = spufs_wbox_callback;
179 spu->stop_callback = spufs_stop_callback;
176 mb(); 180 mb();
181 spu_unmap_mappings(ctx);
177 spu_restore(&ctx->csa, spu); 182 spu_restore(&ctx->csa, spu);
178} 183}
179 184
@@ -181,10 +186,12 @@ static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
181{ 186{
182 pr_debug("%s: unbind pid=%d SPU=%d\n", __FUNCTION__, 187 pr_debug("%s: unbind pid=%d SPU=%d\n", __FUNCTION__,
183 spu->pid, spu->number); 188 spu->pid, spu->number);
189 spu_unmap_mappings(ctx);
184 spu_save(&ctx->csa, spu); 190 spu_save(&ctx->csa, spu);
185 ctx->state = SPU_STATE_SAVED; 191 ctx->state = SPU_STATE_SAVED;
186 spu->ibox_callback = NULL; 192 spu->ibox_callback = NULL;
187 spu->wbox_callback = NULL; 193 spu->wbox_callback = NULL;
194 spu->stop_callback = NULL;
188 spu->mm = NULL; 195 spu->mm = NULL;
189 spu->pid = 0; 196 spu->pid = 0;
190 spu->prio = MAX_PRIO; 197 spu->prio = MAX_PRIO;
@@ -196,37 +203,35 @@ static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
196static struct spu *preempt_active(struct spu_runqueue *rq) 203static struct spu *preempt_active(struct spu_runqueue *rq)
197{ 204{
198 struct list_head *p; 205 struct list_head *p;
199 struct spu_context *ctx; 206 struct spu *worst, *spu;
200 struct spu *spu;
201 207
202 /* Future: implement real preemption. For now just 208 worst = list_entry(rq->active_list.next, struct spu, sched_list);
203 * boot a lower priority ctx that is in "detached"
204 * state, i.e. on a processor but not currently in
205 * spu_run().
206 */
207 list_for_each(p, &rq->active_list) { 209 list_for_each(p, &rq->active_list) {
208 spu = list_entry(p, struct spu, sched_list); 210 spu = list_entry(p, struct spu, sched_list);
209 if (current->prio < spu->prio) { 211 if (spu->prio > worst->prio) {
210 ctx = spu->ctx; 212 worst = spu;
211 if (down_write_trylock(&ctx->state_sema)) { 213 }
212 if (ctx->state != SPU_STATE_RUNNABLE) { 214 }
213 up_write(&ctx->state_sema); 215 if (current->prio < worst->prio) {
214 continue; 216 struct spu_context *ctx = worst->ctx;
215 } 217
216 pr_debug("%s: booting pid=%d from SPU %d\n", 218 spu = worst;
217 __FUNCTION__, spu->pid, spu->number); 219 if (down_write_trylock(&ctx->state_sema)) {
218 del_active(rq, spu); 220 pr_debug("%s: booting pid=%d from SPU %d\n",
219 up(&rq->sem); 221 __FUNCTION__, spu->pid, spu->number);
220 unbind_context(spu, ctx); 222 del_active(rq, spu);
221 up_write(&ctx->state_sema); 223 up(&rq->sem);
222 return spu; 224 wake_up_all(&ctx->stop_wq);
223 } 225 ctx->ops->runcntl_stop(ctx);
226 unbind_context(spu, ctx);
227 up_write(&ctx->state_sema);
228 return spu;
224 } 229 }
225 } 230 }
226 return NULL; 231 return NULL;
227} 232}
228 233
229static struct spu *get_idle_spu(u64 flags) 234static struct spu *get_idle_spu(struct spu_context *ctx, u64 flags)
230{ 235{
231 struct spu_runqueue *rq; 236 struct spu_runqueue *rq;
232 struct spu *spu = NULL; 237 struct spu *spu = NULL;
@@ -255,7 +260,7 @@ static struct spu *get_idle_spu(u64 flags)
255 if ((spu = preempt_active(rq)) != NULL) 260 if ((spu = preempt_active(rq)) != NULL)
256 return spu; 261 return spu;
257 } 262 }
258 prio_wait(rq, flags); 263 prio_wait(rq, ctx, flags);
259 if (signal_pending(current)) { 264 if (signal_pending(current)) {
260 prio_wakeup(rq); 265 prio_wakeup(rq);
261 spu = NULL; 266 spu = NULL;
@@ -322,7 +327,7 @@ int spu_activate(struct spu_context *ctx, u64 flags)
322 327
323 if (ctx->spu) 328 if (ctx->spu)
324 return 0; 329 return 0;
325 spu = get_idle_spu(flags); 330 spu = get_idle_spu(ctx, flags);
326 if (!spu) 331 if (!spu)
327 return (signal_pending(current)) ? -ERESTARTSYS : -EAGAIN; 332 return (signal_pending(current)) ? -ERESTARTSYS : -EAGAIN;
328 bind_context(spu, ctx); 333 bind_context(spu, ctx);
@@ -347,17 +352,19 @@ void spu_deactivate(struct spu_context *ctx)
347void spu_yield(struct spu_context *ctx) 352void spu_yield(struct spu_context *ctx)
348{ 353{
349 struct spu *spu; 354 struct spu *spu;
355 int need_yield = 0;
350 356
351 if (!down_write_trylock(&ctx->state_sema)) 357 down_write(&ctx->state_sema);
352 return;
353 spu = ctx->spu; 358 spu = ctx->spu;
354 if ((ctx->state == SPU_STATE_RUNNABLE) && 359 if (spu && (sched_find_first_bit(spu->rq->prio.bitmap) < MAX_PRIO)) {
355 (sched_find_first_bit(spu->rq->prio.bitmap) <= current->prio)) {
356 pr_debug("%s: yielding SPU %d\n", __FUNCTION__, spu->number); 360 pr_debug("%s: yielding SPU %d\n", __FUNCTION__, spu->number);
357 spu_deactivate(ctx); 361 spu_deactivate(ctx);
358 ctx->state = SPU_STATE_SAVED; 362 ctx->state = SPU_STATE_SAVED;
363 need_yield = 1;
359 } 364 }
360 up_write(&ctx->state_sema); 365 up_write(&ctx->state_sema);
366 if (unlikely(need_yield))
367 yield();
361} 368}
362 369
363int __init spu_sched_init(void) 370int __init spu_sched_init(void)
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 93c6a0537562..20f4e51d1069 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -51,6 +51,7 @@ struct spu_context {
51 struct kref kref; 51 struct kref kref;
52 wait_queue_head_t ibox_wq; 52 wait_queue_head_t ibox_wq;
53 wait_queue_head_t wbox_wq; 53 wait_queue_head_t wbox_wq;
54 wait_queue_head_t stop_wq;
54 struct fasync_struct *ibox_fasync; 55 struct fasync_struct *ibox_fasync;
55 struct fasync_struct *wbox_fasync; 56 struct fasync_struct *wbox_fasync;
56 struct spu_context_ops *ops; 57 struct spu_context_ops *ops;
@@ -74,6 +75,8 @@ struct spu_context_ops {
74 void (*npc_write) (struct spu_context * ctx, u32 data); 75 void (*npc_write) (struct spu_context * ctx, u32 data);
75 u32(*status_read) (struct spu_context * ctx); 76 u32(*status_read) (struct spu_context * ctx);
76 char*(*get_ls) (struct spu_context * ctx); 77 char*(*get_ls) (struct spu_context * ctx);
78 void (*runcntl_write) (struct spu_context * ctx, u32 data);
79 void (*runcntl_stop) (struct spu_context * ctx);
77}; 80};
78 81
79extern struct spu_context_ops spu_hw_ops; 82extern struct spu_context_ops spu_hw_ops;
@@ -99,6 +102,7 @@ struct spu_context * alloc_spu_context(struct address_space *local_store);
99void destroy_spu_context(struct kref *kref); 102void destroy_spu_context(struct kref *kref);
100struct spu_context * get_spu_context(struct spu_context *ctx); 103struct spu_context * get_spu_context(struct spu_context *ctx);
101int put_spu_context(struct spu_context *ctx); 104int put_spu_context(struct spu_context *ctx);
105void spu_unmap_mappings(struct spu_context *ctx);
102 106
103void spu_forget(struct spu_context *ctx); 107void spu_forget(struct spu_context *ctx);
104void spu_acquire(struct spu_context *ctx); 108void spu_acquire(struct spu_context *ctx);
@@ -118,5 +122,6 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
118/* irq callback funcs. */ 122/* irq callback funcs. */
119void spufs_ibox_callback(struct spu *spu); 123void spufs_ibox_callback(struct spu *spu);
120void spufs_wbox_callback(struct spu *spu); 124void spufs_wbox_callback(struct spu *spu);
125void spufs_stop_callback(struct spu *spu);
121 126
122#endif 127#endif