aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2005-11-15 15:53:52 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-08 22:49:30 -0500
commit8b3d6663c6217e4f50cc3720935a96da9b984117 (patch)
tree5295c29787ac66c26ddf715868fda7fcd3ad5f97
parent05b841174c289ca62a6b42d883b8791d9ac3a4bd (diff)
[PATCH] spufs: cooperative scheduler support
This adds a scheduler for SPUs to make it possible to use more logical SPUs than physical ones are present in the system. Currently, there is no support for preempting a running SPU thread, they have to leave the SPU by either triggering an event on the SPU that causes it to return to the owning thread or by sending a signal to it. This patch also adds operations that enable accessing an SPU in either runnable or saved state. We use an RW semaphore to protect the state of the SPU from changing underneath us, while we are holding it readable. In order to change the state, it is acquired writeable and a context save or restore is executed before downgrading the semaphore to read-only. From: Mark Nutter <mnutter@us.ibm.com>, Uli Weigand <Ulrich.Weigand@de.ibm.com> Signed-off-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/platforms/cell/setup.c75
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c138
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c252
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c114
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c599
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c206
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c62
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c419
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h55
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c51
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c10
-rw-r--r--include/asm-powerpc/spu.h19
-rw-r--r--include/asm-powerpc/spu_csa.h1
14 files changed, 1680 insertions, 323 deletions
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index d45dc18855a..25e0f68d053 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -68,6 +68,77 @@ void cell_show_cpuinfo(struct seq_file *m)
68 of_node_put(root); 68 of_node_put(root);
69} 69}
70 70
71#ifdef CONFIG_SPARSEMEM
72static int __init find_spu_node_id(struct device_node *spe)
73{
74 unsigned int *id;
75#ifdef CONFIG_NUMA
76 struct device_node *cpu;
77 cpu = spe->parent->parent;
78 id = (unsigned int *)get_property(cpu, "node-id", NULL);
79#else
80 id = NULL;
81#endif
82 return id ? *id : 0;
83}
84
85static void __init cell_spuprop_present(struct device_node *spe,
86 const char *prop, int early)
87{
88 struct address_prop {
89 unsigned long address;
90 unsigned int len;
91 } __attribute__((packed)) *p;
92 int proplen;
93
94 unsigned long start_pfn, end_pfn, pfn;
95 int node_id;
96
97 p = (void*)get_property(spe, prop, &proplen);
98 WARN_ON(proplen != sizeof (*p));
99
100 node_id = find_spu_node_id(spe);
101
102 start_pfn = p->address >> PAGE_SHIFT;
103 end_pfn = (p->address + p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
104
105 /* We need to call memory_present *before* the call to sparse_init,
106 but we can initialize the page structs only *after* that call.
107 Thus, we're being called twice. */
108 if (early)
109 memory_present(node_id, start_pfn, end_pfn);
110 else {
111 /* As the pages backing SPU LS and I/O are outside the range
112 of regular memory, their page structs were not initialized
113 by free_area_init. Do it here instead. */
114 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
115 struct page *page = pfn_to_page(pfn);
116 set_page_links(page, ZONE_DMA, node_id, pfn);
117 set_page_count(page, 0);
118 reset_page_mapcount(page);
119 SetPageReserved(page);
120 INIT_LIST_HEAD(&page->lru);
121 }
122 }
123}
124
125static void __init cell_spumem_init(int early)
126{
127 struct device_node *node;
128 for (node = of_find_node_by_type(NULL, "spe");
129 node; node = of_find_node_by_type(node, "spe")) {
130 cell_spuprop_present(node, "local-store", early);
131 cell_spuprop_present(node, "problem", early);
132 cell_spuprop_present(node, "priv1", early);
133 cell_spuprop_present(node, "priv2", early);
134 }
135}
136#else
137static void __init cell_spumem_init(int early)
138{
139}
140#endif
141
71static void cell_progress(char *s, unsigned short hex) 142static void cell_progress(char *s, unsigned short hex)
72{ 143{
73 printk("*** %04x : %s\n", hex, s ? s : ""); 144 printk("*** %04x : %s\n", hex, s ? s : "");
@@ -99,6 +170,8 @@ static void __init cell_setup_arch(void)
99#endif 170#endif
100 171
101 mmio_nvram_init(); 172 mmio_nvram_init();
173
174 cell_spumem_init(0);
102} 175}
103 176
104/* 177/*
@@ -114,6 +187,8 @@ static void __init cell_init_early(void)
114 187
115 ppc64_interrupt_controller = IC_CELL_PIC; 188 ppc64_interrupt_controller = IC_CELL_PIC;
116 189
190 cell_spumem_init(1);
191
117 DBG(" <- cell_init_early()\n"); 192 DBG(" <- cell_init_early()\n");
118} 193}
119 194
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 44492d87cdf..408c455cff0 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -69,51 +69,49 @@ static void spu_restart_dma(struct spu *spu)
69 69
70static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) 70static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
71{ 71{
72 struct spu_priv2 __iomem *priv2; 72 struct spu_priv2 __iomem *priv2 = spu->priv2;
73 struct mm_struct *mm; 73 struct mm_struct *mm = spu->mm;
74 u64 esid, vsid;
74 75
75 pr_debug("%s\n", __FUNCTION__); 76 pr_debug("%s\n", __FUNCTION__);
76 77
77 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags)) { 78 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags)) {
79 /* SLBs are pre-loaded for context switch, so
80 * we should never get here!
81 */
78 printk("%s: invalid access during switch!\n", __func__); 82 printk("%s: invalid access during switch!\n", __func__);
79 return 1; 83 return 1;
80 } 84 }
81 85 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
82 if (REGION_ID(ea) != USER_REGION_ID) { 86 /* Future: support kernel segments so that drivers
87 * can use SPUs.
88 */
83 pr_debug("invalid region access at %016lx\n", ea); 89 pr_debug("invalid region access at %016lx\n", ea);
84 return 1; 90 return 1;
85 } 91 }
86 92
87 priv2 = spu->priv2; 93 esid = (ea & ESID_MASK) | SLB_ESID_V;
88 mm = spu->mm; 94 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
95 if (in_hugepage_area(mm->context, ea))
96 vsid |= SLB_VSID_L;
89 97
98 out_be64(&priv2->slb_index_W, spu->slb_replace);
99 out_be64(&priv2->slb_vsid_RW, vsid);
100 out_be64(&priv2->slb_esid_RW, esid);
101
102 spu->slb_replace++;
90 if (spu->slb_replace >= 8) 103 if (spu->slb_replace >= 8)
91 spu->slb_replace = 0; 104 spu->slb_replace = 0;
92 105
93 out_be64(&priv2->slb_index_W, spu->slb_replace);
94 out_be64(&priv2->slb_vsid_RW,
95 (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT)
96 | SLB_VSID_USER);
97 out_be64(&priv2->slb_esid_RW, (ea & ESID_MASK) | SLB_ESID_V);
98
99 spu_restart_dma(spu); 106 spu_restart_dma(spu);
100 107
101 pr_debug("set slb %d context %lx, ea %016lx, vsid %016lx, esid %016lx\n",
102 spu->slb_replace, mm->context.id, ea,
103 (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT)| SLB_VSID_USER,
104 (ea & ESID_MASK) | SLB_ESID_V);
105 return 0; 108 return 0;
106} 109}
107 110
108extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX 111extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
109static int __spu_trap_data_map(struct spu *spu, unsigned long ea) 112static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
110{ 113{
111 unsigned long dsisr;
112 struct spu_priv1 __iomem *priv1;
113
114 pr_debug("%s\n", __FUNCTION__); 114 pr_debug("%s\n", __FUNCTION__);
115 priv1 = spu->priv1;
116 dsisr = in_be64(&priv1->mfc_dsisr_RW);
117 115
118 /* Handle kernel space hash faults immediately. 116 /* Handle kernel space hash faults immediately.
119 User hash faults need to be deferred to process context. */ 117 User hash faults need to be deferred to process context. */
@@ -129,14 +127,17 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea)
129 return 1; 127 return 1;
130 } 128 }
131 129
130 spu->dar = ea;
131 spu->dsisr = dsisr;
132 mb();
132 wake_up(&spu->stop_wq); 133 wake_up(&spu->stop_wq);
133 return 0; 134 return 0;
134} 135}
135 136
136static int __spu_trap_mailbox(struct spu *spu) 137static int __spu_trap_mailbox(struct spu *spu)
137{ 138{
138 wake_up_all(&spu->ibox_wq); 139 if (spu->ibox_callback)
139 kill_fasync(&spu->ibox_fasync, SIGIO, POLLIN); 140 spu->ibox_callback(spu);
140 141
141 /* atomically disable SPU mailbox interrupts */ 142 /* atomically disable SPU mailbox interrupts */
142 spin_lock(&spu->register_lock); 143 spin_lock(&spu->register_lock);
@@ -171,8 +172,8 @@ static int __spu_trap_tag_group(struct spu *spu)
171 172
172static int __spu_trap_spubox(struct spu *spu) 173static int __spu_trap_spubox(struct spu *spu)
173{ 174{
174 wake_up_all(&spu->wbox_wq); 175 if (spu->wbox_callback)
175 kill_fasync(&spu->wbox_fasync, SIGIO, POLLOUT); 176 spu->wbox_callback(spu);
176 177
177 /* atomically disable SPU mailbox interrupts */ 178 /* atomically disable SPU mailbox interrupts */
178 spin_lock(&spu->register_lock); 179 spin_lock(&spu->register_lock);
@@ -220,17 +221,25 @@ static irqreturn_t
220spu_irq_class_1(int irq, void *data, struct pt_regs *regs) 221spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
221{ 222{
222 struct spu *spu; 223 struct spu *spu;
223 unsigned long stat, dar; 224 unsigned long stat, mask, dar, dsisr;
224 225
225 spu = data; 226 spu = data;
226 stat = in_be64(&spu->priv1->int_stat_class1_RW); 227
228 /* atomically read & clear class1 status. */
229 spin_lock(&spu->register_lock);
230 mask = in_be64(&spu->priv1->int_mask_class1_RW);
231 stat = in_be64(&spu->priv1->int_stat_class1_RW) & mask;
227 dar = in_be64(&spu->priv1->mfc_dar_RW); 232 dar = in_be64(&spu->priv1->mfc_dar_RW);
233 dsisr = in_be64(&spu->priv1->mfc_dsisr_RW);
234 out_be64(&spu->priv1->mfc_dsisr_RW, 0UL);
235 out_be64(&spu->priv1->int_stat_class1_RW, stat);
236 spin_unlock(&spu->register_lock);
228 237
229 if (stat & 1) /* segment fault */ 238 if (stat & 1) /* segment fault */
230 __spu_trap_data_seg(spu, dar); 239 __spu_trap_data_seg(spu, dar);
231 240
232 if (stat & 2) { /* mapping fault */ 241 if (stat & 2) { /* mapping fault */
233 __spu_trap_data_map(spu, dar); 242 __spu_trap_data_map(spu, dar, dsisr);
234 } 243 }
235 244
236 if (stat & 4) /* ls compare & suspend on get */ 245 if (stat & 4) /* ls compare & suspend on get */
@@ -239,7 +248,6 @@ spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
239 if (stat & 8) /* ls compare & suspend on put */ 248 if (stat & 8) /* ls compare & suspend on put */
240 ; 249 ;
241 250
242 out_be64(&spu->priv1->int_stat_class1_RW, stat);
243 return stat ? IRQ_HANDLED : IRQ_NONE; 251 return stat ? IRQ_HANDLED : IRQ_NONE;
244} 252}
245 253
@@ -396,8 +404,6 @@ EXPORT_SYMBOL(spu_alloc);
396void spu_free(struct spu *spu) 404void spu_free(struct spu *spu)
397{ 405{
398 down(&spu_mutex); 406 down(&spu_mutex);
399 spu->ibox_fasync = NULL;
400 spu->wbox_fasync = NULL;
401 list_add_tail(&spu->list, &spu_list); 407 list_add_tail(&spu->list, &spu_list);
402 up(&spu_mutex); 408 up(&spu_mutex);
403} 409}
@@ -405,15 +411,13 @@ EXPORT_SYMBOL(spu_free);
405 411
406static int spu_handle_mm_fault(struct spu *spu) 412static int spu_handle_mm_fault(struct spu *spu)
407{ 413{
408 struct spu_priv1 __iomem *priv1;
409 struct mm_struct *mm = spu->mm; 414 struct mm_struct *mm = spu->mm;
410 struct vm_area_struct *vma; 415 struct vm_area_struct *vma;
411 u64 ea, dsisr, is_write; 416 u64 ea, dsisr, is_write;
412 int ret; 417 int ret;
413 418
414 priv1 = spu->priv1; 419 ea = spu->dar;
415 ea = in_be64(&priv1->mfc_dar_RW); 420 dsisr = spu->dsisr;
416 dsisr = in_be64(&priv1->mfc_dsisr_RW);
417#if 0 421#if 0
418 if (!IS_VALID_EA(ea)) { 422 if (!IS_VALID_EA(ea)) {
419 return -EFAULT; 423 return -EFAULT;
@@ -476,15 +480,14 @@ bad_area:
476 480
477static int spu_handle_pte_fault(struct spu *spu) 481static int spu_handle_pte_fault(struct spu *spu)
478{ 482{
479 struct spu_priv1 __iomem *priv1;
480 u64 ea, dsisr, access, error = 0UL; 483 u64 ea, dsisr, access, error = 0UL;
481 int ret = 0; 484 int ret = 0;
482 485
483 priv1 = spu->priv1; 486 ea = spu->dar;
484 ea = in_be64(&priv1->mfc_dar_RW); 487 dsisr = spu->dsisr;
485 dsisr = in_be64(&priv1->mfc_dsisr_RW);
486 access = (_PAGE_PRESENT | _PAGE_USER);
487 if (dsisr & MFC_DSISR_PTE_NOT_FOUND) { 488 if (dsisr & MFC_DSISR_PTE_NOT_FOUND) {
489 access = (_PAGE_PRESENT | _PAGE_USER);
490 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
488 if (hash_page(ea, access, 0x300) != 0) 491 if (hash_page(ea, access, 0x300) != 0)
489 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; 492 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
490 } 493 }
@@ -495,18 +498,33 @@ static int spu_handle_pte_fault(struct spu *spu)
495 else 498 else
496 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR; 499 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
497 } 500 }
498 if (!error) 501 spu->dar = 0UL;
502 spu->dsisr = 0UL;
503 if (!error) {
499 spu_restart_dma(spu); 504 spu_restart_dma(spu);
500 505 } else {
506 __spu_trap_invalid_dma(spu);
507 }
501 return ret; 508 return ret;
502} 509}
503 510
511static inline int spu_pending(struct spu *spu, u32 * stat)
512{
513 struct spu_problem __iomem *prob = spu->problem;
514 u64 pte_fault;
515
516 *stat = in_be32(&prob->spu_status_R);
517 pte_fault = spu->dsisr &
518 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
519 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
520}
521
504int spu_run(struct spu *spu) 522int spu_run(struct spu *spu)
505{ 523{
506 struct spu_problem __iomem *prob; 524 struct spu_problem __iomem *prob;
507 struct spu_priv1 __iomem *priv1; 525 struct spu_priv1 __iomem *priv1;
508 struct spu_priv2 __iomem *priv2; 526 struct spu_priv2 __iomem *priv2;
509 unsigned long status; 527 u32 status;
510 int ret; 528 int ret;
511 529
512 prob = spu->problem; 530 prob = spu->problem;
@@ -514,21 +532,15 @@ int spu_run(struct spu *spu)
514 priv2 = spu->priv2; 532 priv2 = spu->priv2;
515 533
516 /* Let SPU run. */ 534 /* Let SPU run. */
517 spu->mm = current->mm;
518 eieio(); 535 eieio();
519 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); 536 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
520 537
521 do { 538 do {
522 ret = wait_event_interruptible(spu->stop_wq, 539 ret = wait_event_interruptible(spu->stop_wq,
523 (!((status = in_be32(&prob->spu_status_R)) & 0x1)) 540 spu_pending(spu, &status));
524 || (in_be64(&priv1->mfc_dsisr_RW) & MFC_DSISR_PTE_NOT_FOUND) 541
525 || spu->class_0_pending); 542 if (spu->dsisr &
526 543 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
527 if (status & SPU_STATUS_STOPPED_BY_STOP)
528 ret = -EAGAIN;
529 else if (status & SPU_STATUS_STOPPED_BY_HALT)
530 ret = -EIO;
531 else if (in_be64(&priv1->mfc_dsisr_RW) & MFC_DSISR_PTE_NOT_FOUND)
532 ret = spu_handle_pte_fault(spu); 544 ret = spu_handle_pte_fault(spu);
533 545
534 if (spu->class_0_pending) 546 if (spu->class_0_pending)
@@ -537,7 +549,9 @@ int spu_run(struct spu *spu)
537 if (!ret && signal_pending(current)) 549 if (!ret && signal_pending(current))
538 ret = -ERESTARTSYS; 550 ret = -ERESTARTSYS;
539 551
540 } while (!ret); 552 } while (!ret && !(status &
553 (SPU_STATUS_STOPPED_BY_STOP |
554 SPU_STATUS_STOPPED_BY_HALT)));
541 555
542 /* Ensure SPU is stopped. */ 556 /* Ensure SPU is stopped. */
543 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); 557 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
@@ -549,8 +563,6 @@ int spu_run(struct spu *spu)
549 out_be64(&priv1->tlb_invalidate_entry_W, 0UL); 563 out_be64(&priv1->tlb_invalidate_entry_W, 0UL);
550 eieio(); 564 eieio();
551 565
552 spu->mm = NULL;
553
554 /* Check for SPU breakpoint. */ 566 /* Check for SPU breakpoint. */
555 if (unlikely(current->ptrace & PT_PTRACED)) { 567 if (unlikely(current->ptrace & PT_PTRACED)) {
556 status = in_be32(&prob->spu_status_R); 568 status = in_be32(&prob->spu_status_R);
@@ -669,19 +681,21 @@ static int __init create_spu(struct device_node *spe)
669 spu->stop_code = 0; 681 spu->stop_code = 0;
670 spu->slb_replace = 0; 682 spu->slb_replace = 0;
671 spu->mm = NULL; 683 spu->mm = NULL;
684 spu->ctx = NULL;
685 spu->rq = NULL;
686 spu->pid = 0;
672 spu->class_0_pending = 0; 687 spu->class_0_pending = 0;
673 spu->flags = 0UL; 688 spu->flags = 0UL;
689 spu->dar = 0UL;
690 spu->dsisr = 0UL;
674 spin_lock_init(&spu->register_lock); 691 spin_lock_init(&spu->register_lock);
675 692
676 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1)); 693 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
677 out_be64(&spu->priv1->mfc_sr1_RW, 0x33); 694 out_be64(&spu->priv1->mfc_sr1_RW, 0x33);
678 695
679 init_waitqueue_head(&spu->stop_wq); 696 init_waitqueue_head(&spu->stop_wq);
680 init_waitqueue_head(&spu->wbox_wq); 697 spu->ibox_callback = NULL;
681 init_waitqueue_head(&spu->ibox_wq); 698 spu->wbox_callback = NULL;
682
683 spu->ibox_fasync = NULL;
684 spu->wbox_fasync = NULL;
685 699
686 down(&spu_mutex); 700 down(&spu_mutex);
687 spu->number = number++; 701 spu->number = number++;
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index b38ab747efd..ac86b2596d0 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_SPU_FS) += spufs.o 1obj-$(CONFIG_SPU_FS) += spufs.o
2
3spufs-y += inode.o file.o context.o switch.o syscalls.o 2spufs-y += inode.o file.o context.o switch.o syscalls.o
3spufs-y += sched.o backing_ops.o hw_ops.o
4 4
5# Rules to build switch.o with the help of SPU tool chain 5# Rules to build switch.o with the help of SPU tool chain
6SPU_CROSS := spu- 6SPU_CROSS := spu-
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
new file mode 100644
index 00000000000..caf0984064e
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -0,0 +1,252 @@
1/* backing_ops.c - query/set operations on saved SPU context.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
6 * These register operations allow SPUFS to operate on saved
7 * SPU contexts rather than hardware.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#include <linux/config.h>
25#include <linux/module.h>
26#include <linux/errno.h>
27#include <linux/sched.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/vmalloc.h>
31#include <linux/smp.h>
32#include <linux/smp_lock.h>
33#include <linux/stddef.h>
34#include <linux/unistd.h>
35
36#include <asm/io.h>
37#include <asm/spu.h>
38#include <asm/spu_csa.h>
39#include <asm/mmu_context.h>
40#include "spufs.h"
41
42/*
43 * Reads/writes to various problem and priv2 registers require
44 * state changes, i.e. generate SPU events, modify channel
45 * counts, etc.
46 */
47
48static void gen_spu_event(struct spu_context *ctx, u32 event)
49{
50 u64 ch0_cnt;
51 u64 ch0_data;
52 u64 ch1_data;
53
54 ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
55 ch0_data = ctx->csa.spu_chnldata_RW[0];
56 ch1_data = ctx->csa.spu_chnldata_RW[1];
57 ctx->csa.spu_chnldata_RW[0] |= event;
58 if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
59 ctx->csa.spu_chnlcnt_RW[0] = 1;
60 }
61}
62
63static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
64{
65 u32 mbox_stat;
66 int ret = 0;
67
68 spin_lock(&ctx->csa.register_lock);
69 mbox_stat = ctx->csa.prob.mb_stat_R;
70 if (mbox_stat & 0x0000ff) {
71 /* Read the first available word.
72 * Implementation note: the depth
73 * of pu_mb_R is currently 1.
74 */
75 *data = ctx->csa.prob.pu_mb_R;
76 ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
77 ctx->csa.spu_chnlcnt_RW[28] = 1;
78 gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
79 ret = 4;
80 }
81 spin_unlock(&ctx->csa.register_lock);
82 return ret;
83}
84
85static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
86{
87 return ctx->csa.prob.mb_stat_R;
88}
89
90static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
91{
92 int ret;
93
94 spin_lock(&ctx->csa.register_lock);
95 if (ctx->csa.prob.mb_stat_R & 0xff0000) {
96 /* Read the first available word.
97 * Implementation note: the depth
98 * of puint_mb_R is currently 1.
99 */
100 *data = ctx->csa.priv2.puint_mb_R;
101 ctx->csa.prob.mb_stat_R &= ~(0xff0000);
102 ctx->csa.spu_chnlcnt_RW[30] = 1;
103 gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
104 ret = 4;
105 } else {
106 /* make sure we get woken up by the interrupt */
107 ctx->csa.priv1.int_mask_class2_RW |= 0x1UL;
108 ret = 0;
109 }
110 spin_unlock(&ctx->csa.register_lock);
111 return ret;
112}
113
114static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
115{
116 int ret;
117
118 spin_lock(&ctx->csa.register_lock);
119 if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
120 int slot = ctx->csa.spu_chnlcnt_RW[29];
121 int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
122
123 /* We have space to write wbox_data.
124 * Implementation note: the depth
125 * of spu_mb_W is currently 4.
126 */
127 BUG_ON(avail != (4 - slot));
128 ctx->csa.spu_mailbox_data[slot] = data;
129 ctx->csa.spu_chnlcnt_RW[29] = ++slot;
130 ctx->csa.prob.mb_stat_R = (((4 - slot) & 0xff) << 8);
131 gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
132 ret = 4;
133 } else {
134 /* make sure we get woken up by the interrupt when space
135 becomes available */
136 ctx->csa.priv1.int_mask_class2_RW |= 0x10;
137 ret = 0;
138 }
139 spin_unlock(&ctx->csa.register_lock);
140 return ret;
141}
142
143static u32 spu_backing_signal1_read(struct spu_context *ctx)
144{
145 return ctx->csa.spu_chnldata_RW[3];
146}
147
148static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
149{
150 spin_lock(&ctx->csa.register_lock);
151 if (ctx->csa.priv2.spu_cfg_RW & 0x1)
152 ctx->csa.spu_chnldata_RW[3] |= data;
153 else
154 ctx->csa.spu_chnldata_RW[3] = data;
155 ctx->csa.spu_chnlcnt_RW[3] = 1;
156 gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
157 spin_unlock(&ctx->csa.register_lock);
158}
159
160static u32 spu_backing_signal2_read(struct spu_context *ctx)
161{
162 return ctx->csa.spu_chnldata_RW[4];
163}
164
165static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
166{
167 spin_lock(&ctx->csa.register_lock);
168 if (ctx->csa.priv2.spu_cfg_RW & 0x2)
169 ctx->csa.spu_chnldata_RW[4] |= data;
170 else
171 ctx->csa.spu_chnldata_RW[4] = data;
172 ctx->csa.spu_chnlcnt_RW[4] = 1;
173 gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
174 spin_unlock(&ctx->csa.register_lock);
175}
176
177static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
178{
179 u64 tmp;
180
181 spin_lock(&ctx->csa.register_lock);
182 tmp = ctx->csa.priv2.spu_cfg_RW;
183 if (val)
184 tmp |= 1;
185 else
186 tmp &= ~1;
187 ctx->csa.priv2.spu_cfg_RW = tmp;
188 spin_unlock(&ctx->csa.register_lock);
189}
190
191static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
192{
193 return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
194}
195
196static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
197{
198 u64 tmp;
199
200 spin_lock(&ctx->csa.register_lock);
201 tmp = ctx->csa.priv2.spu_cfg_RW;
202 if (val)
203 tmp |= 2;
204 else
205 tmp &= ~2;
206 ctx->csa.priv2.spu_cfg_RW = tmp;
207 spin_unlock(&ctx->csa.register_lock);
208}
209
210static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
211{
212 return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
213}
214
215static u32 spu_backing_npc_read(struct spu_context *ctx)
216{
217 return ctx->csa.prob.spu_npc_RW;
218}
219
220static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
221{
222 ctx->csa.prob.spu_npc_RW = val;
223}
224
225static u32 spu_backing_status_read(struct spu_context *ctx)
226{
227 return ctx->csa.prob.spu_status_R;
228}
229
230static char *spu_backing_get_ls(struct spu_context *ctx)
231{
232 return ctx->csa.lscsa->ls;
233}
234
235struct spu_context_ops spu_backing_ops = {
236 .mbox_read = spu_backing_mbox_read,
237 .mbox_stat_read = spu_backing_mbox_stat_read,
238 .ibox_read = spu_backing_ibox_read,
239 .wbox_write = spu_backing_wbox_write,
240 .signal1_read = spu_backing_signal1_read,
241 .signal1_write = spu_backing_signal1_write,
242 .signal2_read = spu_backing_signal2_read,
243 .signal2_write = spu_backing_signal2_write,
244 .signal1_type_set = spu_backing_signal1_type_set,
245 .signal1_type_get = spu_backing_signal1_type_get,
246 .signal2_type_set = spu_backing_signal2_type_set,
247 .signal2_type_get = spu_backing_signal2_type_get,
248 .npc_read = spu_backing_npc_read,
249 .npc_write = spu_backing_npc_write,
250 .status_read = spu_backing_status_read,
251 .get_ls = spu_backing_get_ls,
252};
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 41eea4576b6..5d6195fc107 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -20,39 +20,38 @@
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 21 */
22 22
23#include <linux/fs.h>
24#include <linux/mm.h>
23#include <linux/slab.h> 25#include <linux/slab.h>
24#include <asm/spu.h> 26#include <asm/spu.h>
25#include <asm/spu_csa.h> 27#include <asm/spu_csa.h>
26#include "spufs.h" 28#include "spufs.h"
27 29
28struct spu_context *alloc_spu_context(void) 30struct spu_context *alloc_spu_context(struct address_space *local_store)
29{ 31{
30 struct spu_context *ctx; 32 struct spu_context *ctx;
31 ctx = kmalloc(sizeof *ctx, GFP_KERNEL); 33 ctx = kmalloc(sizeof *ctx, GFP_KERNEL);
32 if (!ctx) 34 if (!ctx)
33 goto out; 35 goto out;
34 /* Future enhancement: do not call spu_alloc() 36 /* Binding to physical processor deferred
35 * here. This step should be deferred until 37 * until spu_activate().
36 * spu_run()!!
37 *
38 * More work needs to be done to read(),
39 * write(), mmap(), etc., so that operations
40 * are performed on CSA when the context is
41 * not currently being run. In this way we
42 * can support arbitrarily large number of
43 * entries in /spu, allow state queries, etc.
44 */ 38 */
45 ctx->spu = spu_alloc();
46 if (!ctx->spu)
47 goto out_free;
48 spu_init_csa(&ctx->csa); 39 spu_init_csa(&ctx->csa);
49 if (!ctx->csa.lscsa) { 40 if (!ctx->csa.lscsa) {
50 spu_free(ctx->spu);
51 goto out_free; 41 goto out_free;
52 } 42 }
53 init_rwsem(&ctx->backing_sema);
54 spin_lock_init(&ctx->mmio_lock); 43 spin_lock_init(&ctx->mmio_lock);
55 kref_init(&ctx->kref); 44 kref_init(&ctx->kref);
45 init_rwsem(&ctx->state_sema);
46 init_waitqueue_head(&ctx->ibox_wq);
47 init_waitqueue_head(&ctx->wbox_wq);
48 ctx->ibox_fasync = NULL;
49 ctx->wbox_fasync = NULL;
50 ctx->state = SPU_STATE_SAVED;
51 ctx->local_store = local_store;
52 ctx->spu = NULL;
53 ctx->ops = &spu_backing_ops;
54 ctx->owner = get_task_mm(current);
56 goto out; 55 goto out;
57out_free: 56out_free:
58 kfree(ctx); 57 kfree(ctx);
@@ -65,8 +64,11 @@ void destroy_spu_context(struct kref *kref)
65{ 64{
66 struct spu_context *ctx; 65 struct spu_context *ctx;
67 ctx = container_of(kref, struct spu_context, kref); 66 ctx = container_of(kref, struct spu_context, kref);
68 if (ctx->spu) 67 down_write(&ctx->state_sema);
69 spu_free(ctx->spu); 68 spu_deactivate(ctx);
69 ctx->ibox_fasync = NULL;
70 ctx->wbox_fasync = NULL;
71 up_write(&ctx->state_sema);
70 spu_fini_csa(&ctx->csa); 72 spu_fini_csa(&ctx->csa);
71 kfree(ctx); 73 kfree(ctx);
72} 74}
@@ -82,4 +84,80 @@ int put_spu_context(struct spu_context *ctx)
82 return kref_put(&ctx->kref, &destroy_spu_context); 84 return kref_put(&ctx->kref, &destroy_spu_context);
83} 85}
84 86
87/* give up the mm reference when the context is about to be destroyed */
88void spu_forget(struct spu_context *ctx)
89{
90 struct mm_struct *mm;
91 spu_acquire_saved(ctx);
92 mm = ctx->owner;
93 ctx->owner = NULL;
94 mmput(mm);
95 spu_release(ctx);
96}
97
98void spu_acquire(struct spu_context *ctx)
99{
100 down_read(&ctx->state_sema);
101}
102
103void spu_release(struct spu_context *ctx)
104{
105 up_read(&ctx->state_sema);
106}
107
108static void spu_unmap_mappings(struct spu_context *ctx)
109{
110 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
111}
112
113int spu_acquire_runnable(struct spu_context *ctx)
114{
115 int ret = 0;
85 116
117 down_read(&ctx->state_sema);
118 if (ctx->state == SPU_STATE_RUNNABLE)
119 return 0;
120 /* ctx is about to be freed, can't acquire any more */
121 if (!ctx->owner) {
122 ret = -EINVAL;
123 goto out;
124 }
125 up_read(&ctx->state_sema);
126
127 down_write(&ctx->state_sema);
128 if (ctx->state == SPU_STATE_SAVED) {
129 spu_unmap_mappings(ctx);
130 ret = spu_activate(ctx, 0);
131 ctx->state = SPU_STATE_RUNNABLE;
132 }
133 downgrade_write(&ctx->state_sema);
134 if (ret)
135 goto out;
136
137 /* On success, we return holding the lock */
138 return ret;
139out:
140 /* Release here, to simplify calling code. */
141 up_read(&ctx->state_sema);
142
143 return ret;
144}
145
146void spu_acquire_saved(struct spu_context *ctx)
147{
148 down_read(&ctx->state_sema);
149
150 if (ctx->state == SPU_STATE_SAVED)
151 return;
152
153 up_read(&ctx->state_sema);
154 down_write(&ctx->state_sema);
155
156 if (ctx->state == SPU_STATE_RUNNABLE) {
157 spu_unmap_mappings(ctx);
158 spu_deactivate(ctx);
159 ctx->state = SPU_STATE_SAVED;
160 }
161
162 downgrade_write(&ctx->state_sema);
163}
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index c1e64331049..786fdb1a1cc 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -32,11 +32,13 @@
32 32
33#include "spufs.h" 33#include "spufs.h"
34 34
35
35static int 36static int
36spufs_mem_open(struct inode *inode, struct file *file) 37spufs_mem_open(struct inode *inode, struct file *file)
37{ 38{
38 struct spufs_inode_info *i = SPUFS_I(inode); 39 struct spufs_inode_info *i = SPUFS_I(inode);
39 file->private_data = i->i_ctx; 40 file->private_data = i->i_ctx;
41 file->f_mapping = i->i_ctx->local_store;
40 return 0; 42 return 0;
41} 43}
42 44
@@ -44,23 +46,16 @@ static ssize_t
44spufs_mem_read(struct file *file, char __user *buffer, 46spufs_mem_read(struct file *file, char __user *buffer,
45 size_t size, loff_t *pos) 47 size_t size, loff_t *pos)
46{ 48{
47 struct spu *spu; 49 struct spu_context *ctx = file->private_data;
48 struct spu_context *ctx; 50 char *local_store;
49 int ret; 51 int ret;
50 52
51 ctx = file->private_data; 53 spu_acquire(ctx);
52 spu = ctx->spu;
53 54
54 down_read(&ctx->backing_sema); 55 local_store = ctx->ops->get_ls(ctx);
55 if (spu->number & 0/*1*/) { 56 ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
56 ret = generic_file_read(file, buffer, size, pos);
57 goto out;
58 }
59 57
60 ret = simple_read_from_buffer(buffer, size, pos, 58 spu_release(ctx);
61 spu->local_store, LS_SIZE);
62out:
63 up_read(&ctx->backing_sema);
64 return ret; 59 return ret;
65} 60}
66 61
@@ -69,50 +64,181 @@ spufs_mem_write(struct file *file, const char __user *buffer,
69 size_t size, loff_t *pos) 64 size_t size, loff_t *pos)
70{ 65{
71 struct spu_context *ctx = file->private_data; 66 struct spu_context *ctx = file->private_data;
72 struct spu *spu = ctx->spu; 67 char *local_store;
73 68 int ret;
74 if (spu->number & 0) //1)
75 return generic_file_write(file, buffer, size, pos);
76 69
77 size = min_t(ssize_t, LS_SIZE - *pos, size); 70 size = min_t(ssize_t, LS_SIZE - *pos, size);
78 if (size <= 0) 71 if (size <= 0)
79 return -EFBIG; 72 return -EFBIG;
80 *pos += size; 73 *pos += size;
81 return copy_from_user(spu->local_store + *pos - size, 74
82 buffer, size) ? -EFAULT : size; 75 spu_acquire(ctx);
76
77 local_store = ctx->ops->get_ls(ctx);
78 ret = copy_from_user(local_store + *pos - size,
79 buffer, size) ? -EFAULT : size;
80
81 spu_release(ctx);
82 return ret;
83} 83}
84 84
85#ifdef CONFIG_SPARSEMEM
86static struct page *
87spufs_mem_mmap_nopage(struct vm_area_struct *vma,
88 unsigned long address, int *type)
89{
90 struct page *page = NOPAGE_SIGBUS;
91
92 struct spu_context *ctx = vma->vm_file->private_data;
93 unsigned long offset = address - vma->vm_start;
94 offset += vma->vm_pgoff << PAGE_SHIFT;
95
96 spu_acquire(ctx);
97
98 if (ctx->state == SPU_STATE_SAVED)
99 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
100 else
101 page = pfn_to_page((ctx->spu->local_store_phys + offset)
102 >> PAGE_SHIFT);
103
104 spu_release(ctx);
105
106 if (type)
107 *type = VM_FAULT_MINOR;
108
109 return page;
110}
111
112static struct vm_operations_struct spufs_mem_mmap_vmops = {
113 .nopage = spufs_mem_mmap_nopage,
114};
115
85static int 116static int
86spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) 117spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
87{ 118{
88 struct spu_context *ctx = file->private_data; 119 if (!(vma->vm_flags & VM_SHARED))
89 struct spu *spu = ctx->spu; 120 return -EINVAL;
90 unsigned long pfn;
91
92 if (spu->number & 0) //1)
93 return generic_file_mmap(file, vma);
94 121
122 /* FIXME: */
95 vma->vm_flags |= VM_RESERVED; 123 vma->vm_flags |= VM_RESERVED;
96 vma->vm_page_prot = __pgprot(pgprot_val (vma->vm_page_prot) 124 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
97 | _PAGE_NO_CACHE); 125 | _PAGE_NO_CACHE);
98 pfn = spu->local_store_phys >> PAGE_SHIFT; 126
99 /* 127 vma->vm_ops = &spufs_mem_mmap_vmops;
100 * This will work for actual SPUs, but not for vmalloc memory:
101 */
102 if (remap_pfn_range(vma, vma->vm_start, pfn,
103 vma->vm_end-vma->vm_start, vma->vm_page_prot))
104 return -EAGAIN;
105 return 0; 128 return 0;
106} 129}
130#endif
107 131
108static struct file_operations spufs_mem_fops = { 132static struct file_operations spufs_mem_fops = {
109 .open = spufs_mem_open, 133 .open = spufs_mem_open,
110 .read = spufs_mem_read, 134 .read = spufs_mem_read,
111 .write = spufs_mem_write, 135 .write = spufs_mem_write,
136 .llseek = generic_file_llseek,
137#ifdef CONFIG_SPARSEMEM
112 .mmap = spufs_mem_mmap, 138 .mmap = spufs_mem_mmap,
139#endif
140};
141
142static int
143spufs_regs_open(struct inode *inode, struct file *file)
144{
145 struct spufs_inode_info *i = SPUFS_I(inode);
146 file->private_data = i->i_ctx;
147 return 0;
148}
149
150static ssize_t
151spufs_regs_read(struct file *file, char __user *buffer,
152 size_t size, loff_t *pos)
153{
154 struct spu_context *ctx = file->private_data;
155 struct spu_lscsa *lscsa = ctx->csa.lscsa;
156 int ret;
157
158 spu_acquire_saved(ctx);
159
160 ret = simple_read_from_buffer(buffer, size, pos,
161 lscsa->gprs, sizeof lscsa->gprs);
162
163 spu_release(ctx);
164 return ret;
165}
166
167static ssize_t
168spufs_regs_write(struct file *file, const char __user *buffer,
169 size_t size, loff_t *pos)
170{
171 struct spu_context *ctx = file->private_data;
172 struct spu_lscsa *lscsa = ctx->csa.lscsa;
173 int ret;
174
175 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
176 if (size <= 0)
177 return -EFBIG;
178 *pos += size;
179
180 spu_acquire_saved(ctx);
181
182 ret = copy_from_user(lscsa->gprs + *pos - size,
183 buffer, size) ? -EFAULT : size;
184
185 spu_release(ctx);
186 return ret;
187}
188
189static struct file_operations spufs_regs_fops = {
190 .open = spufs_regs_open,
191 .read = spufs_regs_read,
192 .write = spufs_regs_write,
113 .llseek = generic_file_llseek, 193 .llseek = generic_file_llseek,
114}; 194};
115 195
196static ssize_t
197spufs_fpcr_read(struct file *file, char __user * buffer,
198 size_t size, loff_t * pos)
199{
200 struct spu_context *ctx = file->private_data;
201 struct spu_lscsa *lscsa = ctx->csa.lscsa;
202 int ret;
203
204 spu_acquire_saved(ctx);
205
206 ret = simple_read_from_buffer(buffer, size, pos,
207 &lscsa->fpcr, sizeof(lscsa->fpcr));
208
209 spu_release(ctx);
210 return ret;
211}
212
213static ssize_t
214spufs_fpcr_write(struct file *file, const char __user * buffer,
215 size_t size, loff_t * pos)
216{
217 struct spu_context *ctx = file->private_data;
218 struct spu_lscsa *lscsa = ctx->csa.lscsa;
219 int ret;
220
221 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
222 if (size <= 0)
223 return -EFBIG;
224 *pos += size;
225
226 spu_acquire_saved(ctx);
227
228 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
229 buffer, size) ? -EFAULT : size;
230
231 spu_release(ctx);
232 return ret;
233}
234
235static struct file_operations spufs_fpcr_fops = {
236 .open = spufs_regs_open,
237 .read = spufs_fpcr_read,
238 .write = spufs_fpcr_write,
239 .llseek = generic_file_llseek,
240};
241
116/* generic open function for all pipe-like files */ 242/* generic open function for all pipe-like files */
117static int spufs_pipe_open(struct inode *inode, struct file *file) 243static int spufs_pipe_open(struct inode *inode, struct file *file)
118{ 244{
@@ -125,21 +251,19 @@ static int spufs_pipe_open(struct inode *inode, struct file *file)
125static ssize_t spufs_mbox_read(struct file *file, char __user *buf, 251static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
126 size_t len, loff_t *pos) 252 size_t len, loff_t *pos)
127{ 253{
128 struct spu_context *ctx; 254 struct spu_context *ctx = file->private_data;
129 struct spu_problem __iomem *prob;
130 u32 mbox_stat;
131 u32 mbox_data; 255 u32 mbox_data;
256 int ret;
132 257
133 if (len < 4) 258 if (len < 4)
134 return -EINVAL; 259 return -EINVAL;
135 260
136 ctx = file->private_data; 261 spu_acquire(ctx);
137 prob = ctx->spu->problem; 262 ret = ctx->ops->mbox_read(ctx, &mbox_data);
138 mbox_stat = in_be32(&prob->mb_stat_R); 263 spu_release(ctx);
139 if (!(mbox_stat & 0x0000ff))
140 return -EAGAIN;
141 264
142 mbox_data = in_be32(&prob->pu_mb_R); 265 if (!ret)
266 return -EAGAIN;
143 267
144 if (copy_to_user(buf, &mbox_data, sizeof mbox_data)) 268 if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
145 return -EFAULT; 269 return -EFAULT;
@@ -155,14 +279,17 @@ static struct file_operations spufs_mbox_fops = {
155static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, 279static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
156 size_t len, loff_t *pos) 280 size_t len, loff_t *pos)
157{ 281{
158 struct spu_context *ctx; 282 struct spu_context *ctx = file->private_data;
159 u32 mbox_stat; 283 u32 mbox_stat;
160 284
161 if (len < 4) 285 if (len < 4)
162 return -EINVAL; 286 return -EINVAL;
163 287
164 ctx = file->private_data; 288 spu_acquire(ctx);
165 mbox_stat = in_be32(&ctx->spu->problem->mb_stat_R) & 0xff; 289
290 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
291
292 spu_release(ctx);
166 293
167 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) 294 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
168 return -EFAULT; 295 return -EFAULT;
@@ -175,57 +302,78 @@ static struct file_operations spufs_mbox_stat_fops = {
175 .read = spufs_mbox_stat_read, 302 .read = spufs_mbox_stat_read,
176}; 303};
177 304
305/*
306 * spufs_wait
307 * Same as wait_event_interruptible(), except that here
308 * we need to call spu_release(ctx) before sleeping, and
309 * then spu_acquire(ctx) when awoken.
310 */
311
312#define spufs_wait(wq, condition) \
313({ \
314 int __ret = 0; \
315 DEFINE_WAIT(__wait); \
316 for (;;) { \
317 prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \
318 if (condition) \
319 break; \
320 if (!signal_pending(current)) { \
321 spu_release(ctx); \
322 schedule(); \
323 spu_acquire(ctx); \
324 continue; \
325 } \
326 __ret = -ERESTARTSYS; \
327 break; \
328 } \
329 finish_wait(&(wq), &__wait); \
330 __ret; \
331})
332
178/* low-level ibox access function */ 333/* low-level ibox access function */
179size_t spu_ibox_read(struct spu *spu, u32 *data) 334size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
180{ 335{
181 int ret; 336 return ctx->ops->ibox_read(ctx, data);
182 337}
183 spin_lock_irq(&spu->register_lock);
184 338
185 if (in_be32(&spu->problem->mb_stat_R) & 0xff0000) { 339static int spufs_ibox_fasync(int fd, struct file *file, int on)
186 /* read the first available word */ 340{
187 *data = in_be64(&spu->priv2->puint_mb_R); 341 struct spu_context *ctx = file->private_data;
188 ret = 4;
189 } else {
190 /* make sure we get woken up by the interrupt */
191 out_be64(&spu->priv1->int_mask_class2_RW,
192 in_be64(&spu->priv1->int_mask_class2_RW) | 0x1);
193 ret = 0;
194 }
195 342
196 spin_unlock_irq(&spu->register_lock); 343 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
197 return ret;
198} 344}
199EXPORT_SYMBOL(spu_ibox_read);
200 345
201static int spufs_ibox_fasync(int fd, struct file *file, int on) 346/* interrupt-level ibox callback function. */
347void spufs_ibox_callback(struct spu *spu)
202{ 348{
203 struct spu_context *ctx; 349 struct spu_context *ctx = spu->ctx;
204 ctx = file->private_data; 350
205 return fasync_helper(fd, file, on, &ctx->spu->ibox_fasync); 351 wake_up_all(&ctx->ibox_wq);
352 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
206} 353}
207 354
208static ssize_t spufs_ibox_read(struct file *file, char __user *buf, 355static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
209 size_t len, loff_t *pos) 356 size_t len, loff_t *pos)
210{ 357{
211 struct spu_context *ctx; 358 struct spu_context *ctx = file->private_data;
212 u32 ibox_data; 359 u32 ibox_data;
213 ssize_t ret; 360 ssize_t ret;
214 361
215 if (len < 4) 362 if (len < 4)
216 return -EINVAL; 363 return -EINVAL;
217 364
218 ctx = file->private_data; 365 spu_acquire(ctx);
219 366
220 ret = 0; 367 ret = 0;
221 if (file->f_flags & O_NONBLOCK) { 368 if (file->f_flags & O_NONBLOCK) {
222 if (!spu_ibox_read(ctx->spu, &ibox_data)) 369 if (!spu_ibox_read(ctx, &ibox_data))
223 ret = -EAGAIN; 370 ret = -EAGAIN;
224 } else { 371 } else {
225 ret = wait_event_interruptible(ctx->spu->ibox_wq, 372 ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
226 spu_ibox_read(ctx->spu, &ibox_data));
227 } 373 }
228 374
375 spu_release(ctx);
376
229 if (ret) 377 if (ret)
230 return ret; 378 return ret;
231 379
@@ -238,16 +386,17 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
238 386
239static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) 387static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
240{ 388{
241 struct spu_context *ctx; 389 struct spu_context *ctx = file->private_data;
242 struct spu_problem __iomem *prob;
243 u32 mbox_stat; 390 u32 mbox_stat;
244 unsigned int mask; 391 unsigned int mask;
245 392
246 ctx = file->private_data; 393 spu_acquire(ctx);
247 prob = ctx->spu->problem; 394
248 mbox_stat = in_be32(&prob->mb_stat_R); 395 mbox_stat = ctx->ops->mbox_stat_read(ctx);
396
397 spu_release(ctx);
249 398
250 poll_wait(file, &ctx->spu->ibox_wq, wait); 399 poll_wait(file, &ctx->ibox_wq, wait);
251 400
252 mask = 0; 401 mask = 0;
253 if (mbox_stat & 0xff0000) 402 if (mbox_stat & 0xff0000)
@@ -266,14 +415,15 @@ static struct file_operations spufs_ibox_fops = {
266static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, 415static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
267 size_t len, loff_t *pos) 416 size_t len, loff_t *pos)
268{ 417{
269 struct spu_context *ctx; 418 struct spu_context *ctx = file->private_data;
270 u32 ibox_stat; 419 u32 ibox_stat;
271 420
272 if (len < 4) 421 if (len < 4)
273 return -EINVAL; 422 return -EINVAL;
274 423
275 ctx = file->private_data; 424 spu_acquire(ctx);
276 ibox_stat = (in_be32(&ctx->spu->problem->mb_stat_R) >> 16) & 0xff; 425 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
426 spu_release(ctx);
277 427
278 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) 428 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
279 return -EFAULT; 429 return -EFAULT;
@@ -287,75 +437,69 @@ static struct file_operations spufs_ibox_stat_fops = {
287}; 437};
288 438
289/* low-level mailbox write */ 439/* low-level mailbox write */
290size_t spu_wbox_write(struct spu *spu, u32 data) 440size_t spu_wbox_write(struct spu_context *ctx, u32 data)
291{ 441{
292 int ret; 442 return ctx->ops->wbox_write(ctx, data);
443}
293 444
294 spin_lock_irq(&spu->register_lock); 445static int spufs_wbox_fasync(int fd, struct file *file, int on)
446{
447 struct spu_context *ctx = file->private_data;
448 int ret;
295 449
296 if (in_be32(&spu->problem->mb_stat_R) & 0x00ff00) { 450 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
297 /* we have space to write wbox_data to */
298 out_be32(&spu->problem->spu_mb_W, data);
299 ret = 4;
300 } else {
301 /* make sure we get woken up by the interrupt when space
302 becomes available */
303 out_be64(&spu->priv1->int_mask_class2_RW,
304 in_be64(&spu->priv1->int_mask_class2_RW) | 0x10);
305 ret = 0;
306 }
307 451
308 spin_unlock_irq(&spu->register_lock);
309 return ret; 452 return ret;
310} 453}
311EXPORT_SYMBOL(spu_wbox_write);
312 454
313static int spufs_wbox_fasync(int fd, struct file *file, int on) 455/* interrupt-level wbox callback function. */
456void spufs_wbox_callback(struct spu *spu)
314{ 457{
315 struct spu_context *ctx; 458 struct spu_context *ctx = spu->ctx;
316 ctx = file->private_data; 459
317 return fasync_helper(fd, file, on, &ctx->spu->wbox_fasync); 460 wake_up_all(&ctx->wbox_wq);
461 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
318} 462}
319 463
320static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, 464static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
321 size_t len, loff_t *pos) 465 size_t len, loff_t *pos)
322{ 466{
323 struct spu_context *ctx; 467 struct spu_context *ctx = file->private_data;
324 u32 wbox_data; 468 u32 wbox_data;
325 int ret; 469 int ret;
326 470
327 if (len < 4) 471 if (len < 4)
328 return -EINVAL; 472 return -EINVAL;
329 473
330 ctx = file->private_data;
331
332 if (copy_from_user(&wbox_data, buf, sizeof wbox_data)) 474 if (copy_from_user(&wbox_data, buf, sizeof wbox_data))
333 return -EFAULT; 475 return -EFAULT;
334 476
477 spu_acquire(ctx);
478
335 ret = 0; 479 ret = 0;
336 if (file->f_flags & O_NONBLOCK) { 480 if (file->f_flags & O_NONBLOCK) {
337 if (!spu_wbox_write(ctx->spu, wbox_data)) 481 if (!spu_wbox_write(ctx, wbox_data))
338 ret = -EAGAIN; 482 ret = -EAGAIN;
339 } else { 483 } else {
340 ret = wait_event_interruptible(ctx->spu->wbox_wq, 484 ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
341 spu_wbox_write(ctx->spu, wbox_data));
342 } 485 }
343 486
487 spu_release(ctx);
488
344 return ret ? ret : sizeof wbox_data; 489 return ret ? ret : sizeof wbox_data;
345} 490}
346 491
347static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) 492static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
348{ 493{
349 struct spu_context *ctx; 494 struct spu_context *ctx = file->private_data;
350 struct spu_problem __iomem *prob;
351 u32 mbox_stat; 495 u32 mbox_stat;
352 unsigned int mask; 496 unsigned int mask;
353 497
354 ctx = file->private_data; 498 spu_acquire(ctx);
355 prob = ctx->spu->problem; 499 mbox_stat = ctx->ops->mbox_stat_read(ctx);
356 mbox_stat = in_be32(&prob->mb_stat_R); 500 spu_release(ctx);
357 501
358 poll_wait(file, &ctx->spu->wbox_wq, wait); 502 poll_wait(file, &ctx->wbox_wq, wait);
359 503
360 mask = 0; 504 mask = 0;
361 if (mbox_stat & 0x00ff00) 505 if (mbox_stat & 0x00ff00)
@@ -374,14 +518,15 @@ static struct file_operations spufs_wbox_fops = {
374static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, 518static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
375 size_t len, loff_t *pos) 519 size_t len, loff_t *pos)
376{ 520{
377 struct spu_context *ctx; 521 struct spu_context *ctx = file->private_data;
378 u32 wbox_stat; 522 u32 wbox_stat;
379 523
380 if (len < 4) 524 if (len < 4)
381 return -EINVAL; 525 return -EINVAL;
382 526
383 ctx = file->private_data; 527 spu_acquire(ctx);
384 wbox_stat = (in_be32(&ctx->spu->problem->mb_stat_R) >> 8) & 0xff; 528 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
529 spu_release(ctx);
385 530
386 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) 531 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
387 return -EFAULT; 532 return -EFAULT;
@@ -395,47 +540,41 @@ static struct file_operations spufs_wbox_stat_fops = {
395}; 540};
396 541
397long spufs_run_spu(struct file *file, struct spu_context *ctx, 542long spufs_run_spu(struct file *file, struct spu_context *ctx,
398 u32 *npc, u32 *status) 543 u32 *npc, u32 *status)
399{ 544{
400 struct spu_problem __iomem *prob;
401 int ret; 545 int ret;
402 546
403 if (file->f_flags & O_NONBLOCK) { 547 ret = spu_acquire_runnable(ctx);
404 ret = -EAGAIN; 548 if (ret)
405 if (!down_write_trylock(&ctx->backing_sema)) 549 return ret;
406 goto out;
407 } else {
408 down_write(&ctx->backing_sema);
409 }
410 550
411 prob = ctx->spu->problem; 551 ctx->ops->npc_write(ctx, *npc);
412 out_be32(&prob->spu_npc_RW, *npc);
413 552
414 ret = spu_run(ctx->spu); 553 ret = spu_run(ctx->spu);
415 554
416 *status = in_be32(&prob->spu_status_R); 555 if (!ret)
417 *npc = in_be32(&prob->spu_npc_RW); 556 ret = ctx->ops->status_read(ctx);
418 557
419 up_write(&ctx->backing_sema); 558 *npc = ctx->ops->npc_read(ctx);
420 559
421out: 560 spu_release(ctx);
561 spu_yield(ctx);
422 return ret; 562 return ret;
423} 563}
424 564
425static ssize_t spufs_signal1_read(struct file *file, char __user *buf, 565static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
426 size_t len, loff_t *pos) 566 size_t len, loff_t *pos)
427{ 567{
428 struct spu_context *ctx; 568 struct spu_context *ctx = file->private_data;
429 struct spu_problem *prob;
430 u32 data; 569 u32 data;
431 570
432 ctx = file->private_data;
433 prob = ctx->spu->problem;
434
435 if (len < 4) 571 if (len < 4)
436 return -EINVAL; 572 return -EINVAL;
437 573
438 data = in_be32(&prob->signal_notify1); 574 spu_acquire(ctx);
575 data = ctx->ops->signal1_read(ctx);
576 spu_release(ctx);
577
439 if (copy_to_user(buf, &data, 4)) 578 if (copy_to_user(buf, &data, 4))
440 return -EFAULT; 579 return -EFAULT;
441 580
@@ -446,11 +585,9 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
446 size_t len, loff_t *pos) 585 size_t len, loff_t *pos)
447{ 586{
448 struct spu_context *ctx; 587 struct spu_context *ctx;
449 struct spu_problem *prob;
450 u32 data; 588 u32 data;
451 589
452 ctx = file->private_data; 590 ctx = file->private_data;
453 prob = ctx->spu->problem;
454 591
455 if (len < 4) 592 if (len < 4)
456 return -EINVAL; 593 return -EINVAL;
@@ -458,7 +595,9 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
458 if (copy_from_user(&data, buf, 4)) 595 if (copy_from_user(&data, buf, 4))
459 return -EFAULT; 596 return -EFAULT;
460 597
461 out_be32(&prob->signal_notify1, data); 598 spu_acquire(ctx);
599 ctx->ops->signal1_write(ctx, data);
600 spu_release(ctx);
462 601
463 return 4; 602 return 4;
464} 603}
@@ -473,16 +612,17 @@ static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
473 size_t len, loff_t *pos) 612 size_t len, loff_t *pos)
474{ 613{
475 struct spu_context *ctx; 614 struct spu_context *ctx;
476 struct spu_problem *prob;
477 u32 data; 615 u32 data;
478 616
479 ctx = file->private_data; 617 ctx = file->private_data;
480 prob = ctx->spu->problem;
481 618
482 if (len < 4) 619 if (len < 4)
483 return -EINVAL; 620 return -EINVAL;
484 621
485 data = in_be32(&prob->signal_notify2); 622 spu_acquire(ctx);
623 data = ctx->ops->signal2_read(ctx);
624 spu_release(ctx);
625
486 if (copy_to_user(buf, &data, 4)) 626 if (copy_to_user(buf, &data, 4))
487 return -EFAULT; 627 return -EFAULT;
488 628
@@ -493,11 +633,9 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
493 size_t len, loff_t *pos) 633 size_t len, loff_t *pos)
494{ 634{
495 struct spu_context *ctx; 635 struct spu_context *ctx;
496 struct spu_problem *prob;
497 u32 data; 636 u32 data;
498 637
499 ctx = file->private_data; 638 ctx = file->private_data;
500 prob = ctx->spu->problem;
501 639
502 if (len < 4) 640 if (len < 4)
503 return -EINVAL; 641 return -EINVAL;
@@ -505,7 +643,9 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
505 if (copy_from_user(&data, buf, 4)) 643 if (copy_from_user(&data, buf, 4))
506 return -EFAULT; 644 return -EFAULT;
507 645
508 out_be32(&prob->signal_notify2, data); 646 spu_acquire(ctx);
647 ctx->ops->signal2_write(ctx, data);
648 spu_release(ctx);
509 649
510 return 4; 650 return 4;
511} 651}
@@ -519,23 +659,22 @@ static struct file_operations spufs_signal2_fops = {
519static void spufs_signal1_type_set(void *data, u64 val) 659static void spufs_signal1_type_set(void *data, u64 val)
520{ 660{
521 struct spu_context *ctx = data; 661 struct spu_context *ctx = data;
522 struct spu_priv2 *priv2 = ctx->spu->priv2;
523 u64 tmp;
524 662
525 spin_lock_irq(&ctx->spu->register_lock); 663 spu_acquire(ctx);
526 tmp = in_be64(&priv2->spu_cfg_RW); 664 ctx->ops->signal1_type_set(ctx, val);
527 if (val) 665 spu_release(ctx);
528 tmp |= 1;
529 else
530 tmp &= ~1;
531 out_be64(&priv2->spu_cfg_RW, tmp);
532 spin_unlock_irq(&ctx->spu->register_lock);
533} 666}
534 667
535static u64 spufs_signal1_type_get(void *data) 668static u64 spufs_signal1_type_get(void *data)
536{ 669{
537 struct spu_context *ctx = data; 670 struct spu_context *ctx = data;
538 return (in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0; 671 u64 ret;
672
673 spu_acquire(ctx);
674 ret = ctx->ops->signal1_type_get(ctx);
675 spu_release(ctx);
676
677 return ret;
539} 678}
540DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, 679DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
541 spufs_signal1_type_set, "%llu"); 680 spufs_signal1_type_set, "%llu");
@@ -543,23 +682,22 @@ DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
543static void spufs_signal2_type_set(void *data, u64 val) 682static void spufs_signal2_type_set(void *data, u64 val)
544{ 683{
545 struct spu_context *ctx = data; 684 struct spu_context *ctx = data;
546 struct spu_priv2 *priv2 = ctx->spu->priv2;
547 u64 tmp;
548 685
549 spin_lock_irq(&ctx->spu->register_lock); 686 spu_acquire(ctx);
550 tmp = in_be64(&priv2->spu_cfg_RW); 687 ctx->ops->signal2_type_set(ctx, val);
551 if (val) 688 spu_release(ctx);
552 tmp |= 2;
553 else
554 tmp &= ~2;
555 out_be64(&priv2->spu_cfg_RW, tmp);
556 spin_unlock_irq(&ctx->spu->register_lock);
557} 689}
558 690
559static u64 spufs_signal2_type_get(void *data) 691static u64 spufs_signal2_type_get(void *data)
560{ 692{
561 struct spu_context *ctx = data; 693 struct spu_context *ctx = data;
562 return (in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0; 694 u64 ret;
695
696 spu_acquire(ctx);
697 ret = ctx->ops->signal2_type_get(ctx);
698 spu_release(ctx);
699
700 return ret;
563} 701}
564DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, 702DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
565 spufs_signal2_type_set, "%llu"); 703 spufs_signal2_type_set, "%llu");
@@ -567,20 +705,135 @@ DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
567static void spufs_npc_set(void *data, u64 val) 705static void spufs_npc_set(void *data, u64 val)
568{ 706{
569 struct spu_context *ctx = data; 707 struct spu_context *ctx = data;
570 out_be32(&ctx->spu->problem->spu_npc_RW, val); 708 spu_acquire(ctx);
709 ctx->ops->npc_write(ctx, val);
710 spu_release(ctx);
571} 711}
572 712
573static u64 spufs_npc_get(void *data) 713static u64 spufs_npc_get(void *data)
574{ 714{
575 struct spu_context *ctx = data; 715 struct spu_context *ctx = data;
576 u64 ret; 716 u64 ret;
577 ret = in_be32(&ctx->spu->problem->spu_npc_RW); 717 spu_acquire(ctx);
718 ret = ctx->ops->npc_read(ctx);
719 spu_release(ctx);
578 return ret; 720 return ret;
579} 721}
580DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n") 722DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
581 723
724static void spufs_decr_set(void *data, u64 val)
725{
726 struct spu_context *ctx = data;
727 struct spu_lscsa *lscsa = ctx->csa.lscsa;
728 spu_acquire_saved(ctx);
729 lscsa->decr.slot[0] = (u32) val;
730 spu_release(ctx);
731}
732
733static u64 spufs_decr_get(void *data)
734{
735 struct spu_context *ctx = data;
736 struct spu_lscsa *lscsa = ctx->csa.lscsa;
737 u64 ret;
738 spu_acquire_saved(ctx);
739 ret = lscsa->decr.slot[0];
740 spu_release(ctx);
741 return ret;
742}
743DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
744 "%llx\n")
745
746static void spufs_decr_status_set(void *data, u64 val)
747{
748 struct spu_context *ctx = data;
749 struct spu_lscsa *lscsa = ctx->csa.lscsa;
750 spu_acquire_saved(ctx);
751 lscsa->decr_status.slot[0] = (u32) val;
752 spu_release(ctx);
753}
754
755static u64 spufs_decr_status_get(void *data)
756{
757 struct spu_context *ctx = data;
758 struct spu_lscsa *lscsa = ctx->csa.lscsa;
759 u64 ret;
760 spu_acquire_saved(ctx);
761 ret = lscsa->decr_status.slot[0];
762 spu_release(ctx);
763 return ret;
764}
765DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
766 spufs_decr_status_set, "%llx\n")
767
768static void spufs_spu_tag_mask_set(void *data, u64 val)
769{
770 struct spu_context *ctx = data;
771 struct spu_lscsa *lscsa = ctx->csa.lscsa;
772 spu_acquire_saved(ctx);
773 lscsa->tag_mask.slot[0] = (u32) val;
774 spu_release(ctx);
775}
776
777static u64 spufs_spu_tag_mask_get(void *data)
778{
779 struct spu_context *ctx = data;
780 struct spu_lscsa *lscsa = ctx->csa.lscsa;
781 u64 ret;
782 spu_acquire_saved(ctx);
783 ret = lscsa->tag_mask.slot[0];
784 spu_release(ctx);
785 return ret;
786}
787DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get,
788 spufs_spu_tag_mask_set, "%llx\n")
789
790static void spufs_event_mask_set(void *data, u64 val)
791{
792 struct spu_context *ctx = data;
793 struct spu_lscsa *lscsa = ctx->csa.lscsa;
794 spu_acquire_saved(ctx);
795 lscsa->event_mask.slot[0] = (u32) val;
796 spu_release(ctx);
797}
798
799static u64 spufs_event_mask_get(void *data)
800{
801 struct spu_context *ctx = data;
802 struct spu_lscsa *lscsa = ctx->csa.lscsa;
803 u64 ret;
804 spu_acquire_saved(ctx);
805 ret = lscsa->event_mask.slot[0];
806 spu_release(ctx);
807 return ret;
808}
809DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
810 spufs_event_mask_set, "%llx\n")
811
812static void spufs_srr0_set(void *data, u64 val)
813{
814 struct spu_context *ctx = data;
815 struct spu_lscsa *lscsa = ctx->csa.lscsa;
816 spu_acquire_saved(ctx);
817 lscsa->srr0.slot[0] = (u32) val;
818 spu_release(ctx);
819}
820
821static u64 spufs_srr0_get(void *data)
822{
823 struct spu_context *ctx = data;
824 struct spu_lscsa *lscsa = ctx->csa.lscsa;
825 u64 ret;
826 spu_acquire_saved(ctx);
827 ret = lscsa->srr0.slot[0];
828 spu_release(ctx);
829 return ret;
830}
831DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
832 "%llx\n")
833
582struct tree_descr spufs_dir_contents[] = { 834struct tree_descr spufs_dir_contents[] = {
583 { "mem", &spufs_mem_fops, 0666, }, 835 { "mem", &spufs_mem_fops, 0666, },
836 { "regs", &spufs_regs_fops, 0666, },
584 { "mbox", &spufs_mbox_fops, 0444, }, 837 { "mbox", &spufs_mbox_fops, 0444, },
585 { "ibox", &spufs_ibox_fops, 0444, }, 838 { "ibox", &spufs_ibox_fops, 0444, },
586 { "wbox", &spufs_wbox_fops, 0222, }, 839 { "wbox", &spufs_wbox_fops, 0222, },
@@ -592,5 +845,11 @@ struct tree_descr spufs_dir_contents[] = {
592 { "signal1_type", &spufs_signal1_type, 0666, }, 845 { "signal1_type", &spufs_signal1_type, 0666, },
593 { "signal2_type", &spufs_signal2_type, 0666, }, 846 { "signal2_type", &spufs_signal2_type, 0666, },
594 { "npc", &spufs_npc_ops, 0666, }, 847 { "npc", &spufs_npc_ops, 0666, },
848 { "fpcr", &spufs_fpcr_fops, 0666, },
849 { "decr", &spufs_decr_ops, 0666, },
850 { "decr_status", &spufs_decr_status_ops, 0666, },
851 { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
852 { "event_mask", &spufs_event_mask_ops, 0666, },
853 { "srr0", &spufs_srr0_ops, 0666, },
595 {}, 854 {},
596}; 855};
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
new file mode 100644
index 00000000000..2e90cae98a8
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -0,0 +1,206 @@
1/* hw_ops.c - query/set operations on active SPU context.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/vmalloc.h>
28#include <linux/smp.h>
29#include <linux/smp_lock.h>
30#include <linux/stddef.h>
31#include <linux/unistd.h>
32
33#include <asm/io.h>
34#include <asm/spu.h>
35#include <asm/spu_csa.h>
36#include <asm/mmu_context.h>
37#include "spufs.h"
38
39static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data)
40{
41 struct spu *spu = ctx->spu;
42 struct spu_problem __iomem *prob = spu->problem;
43 u32 mbox_stat;
44 int ret = 0;
45
46 spin_lock_irq(&spu->register_lock);
47 mbox_stat = in_be32(&prob->mb_stat_R);
48 if (mbox_stat & 0x0000ff) {
49 *data = in_be32(&prob->pu_mb_R);
50 ret = 4;
51 }
52 spin_unlock_irq(&spu->register_lock);
53 return ret;
54}
55
56static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
57{
58 return in_be32(&ctx->spu->problem->mb_stat_R);
59}
60
61static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
62{
63 struct spu *spu = ctx->spu;
64 struct spu_problem __iomem *prob = spu->problem;
65 struct spu_priv1 __iomem *priv1 = spu->priv1;
66 struct spu_priv2 __iomem *priv2 = spu->priv2;
67 int ret;
68
69 spin_lock_irq(&spu->register_lock);
70 if (in_be32(&prob->mb_stat_R) & 0xff0000) {
71 /* read the first available word */
72 *data = in_be64(&priv2->puint_mb_R);
73 ret = 4;
74 } else {
75 /* make sure we get woken up by the interrupt */
76 out_be64(&priv1->int_mask_class2_RW,
77 in_be64(&priv1->int_mask_class2_RW) | 0x1);
78 ret = 0;
79 }
80 spin_unlock_irq(&spu->register_lock);
81 return ret;
82}
83
84static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
85{
86 struct spu *spu = ctx->spu;
87 struct spu_problem __iomem *prob = spu->problem;
88 struct spu_priv1 __iomem *priv1 = spu->priv1;
89 int ret;
90
91 spin_lock_irq(&spu->register_lock);
92 if (in_be32(&prob->mb_stat_R) & 0x00ff00) {
93 /* we have space to write wbox_data to */
94 out_be32(&prob->spu_mb_W, data);
95 ret = 4;
96 } else {
97 /* make sure we get woken up by the interrupt when space
98 becomes available */
99 out_be64(&priv1->int_mask_class2_RW,
100 in_be64(&priv1->int_mask_class2_RW) | 0x10);
101 ret = 0;
102 }
103 spin_unlock_irq(&spu->register_lock);
104 return ret;
105}
106
107static u32 spu_hw_signal1_read(struct spu_context *ctx)
108{
109 return in_be32(&ctx->spu->problem->signal_notify1);
110}
111
112static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
113{
114 out_be32(&ctx->spu->problem->signal_notify1, data);
115}
116
117static u32 spu_hw_signal2_read(struct spu_context *ctx)
118{
119 return in_be32(&ctx->spu->problem->signal_notify1);
120}
121
122static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
123{
124 out_be32(&ctx->spu->problem->signal_notify2, data);
125}
126
127static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val)
128{
129 struct spu *spu = ctx->spu;
130 struct spu_priv2 __iomem *priv2 = spu->priv2;
131 u64 tmp;
132
133 spin_lock_irq(&spu->register_lock);
134 tmp = in_be64(&priv2->spu_cfg_RW);
135 if (val)
136 tmp |= 1;
137 else
138 tmp &= ~1;
139 out_be64(&priv2->spu_cfg_RW, tmp);
140 spin_unlock_irq(&spu->register_lock);
141}
142
143static u64 spu_hw_signal1_type_get(struct spu_context *ctx)
144{
145 return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0);
146}
147
148static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val)
149{
150 struct spu *spu = ctx->spu;
151 struct spu_priv2 __iomem *priv2 = spu->priv2;
152 u64 tmp;
153
154 spin_lock_irq(&spu->register_lock);
155 tmp = in_be64(&priv2->spu_cfg_RW);
156 if (val)
157 tmp |= 2;
158 else
159 tmp &= ~2;
160 out_be64(&priv2->spu_cfg_RW, tmp);
161 spin_unlock_irq(&spu->register_lock);
162}
163
164static u64 spu_hw_signal2_type_get(struct spu_context *ctx)
165{
166 return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0);
167}
168
169static u32 spu_hw_npc_read(struct spu_context *ctx)
170{
171 return in_be32(&ctx->spu->problem->spu_npc_RW);
172}
173
174static void spu_hw_npc_write(struct spu_context *ctx, u32 val)
175{
176 out_be32(&ctx->spu->problem->spu_npc_RW, val);
177}
178
179static u32 spu_hw_status_read(struct spu_context *ctx)
180{
181 return in_be32(&ctx->spu->problem->spu_status_R);
182}
183
184static char *spu_hw_get_ls(struct spu_context *ctx)
185{
186 return ctx->spu->local_store;
187}
188
189struct spu_context_ops spu_hw_ops = {
190 .mbox_read = spu_hw_mbox_read,
191 .mbox_stat_read = spu_hw_mbox_stat_read,
192 .ibox_read = spu_hw_ibox_read,
193 .wbox_write = spu_hw_wbox_write,
194 .signal1_read = spu_hw_signal1_read,
195 .signal1_write = spu_hw_signal1_write,
196 .signal2_read = spu_hw_signal2_read,
197 .signal2_write = spu_hw_signal2_write,
198 .signal1_type_set = spu_hw_signal1_type_set,
199 .signal1_type_get = spu_hw_signal1_type_get,
200 .signal2_type_set = spu_hw_signal2_type_set,
201 .signal2_type_get = spu_hw_signal2_type_get,
202 .npc_read = spu_hw_npc_read,
203 .npc_write = spu_hw_npc_write,
204 .status_read = spu_hw_status_read,
205 .get_ls = spu_hw_get_ls,
206};
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index f7aa0a6b1ce..2c3ba4eb41c 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -41,24 +41,6 @@
41 41
42static kmem_cache_t *spufs_inode_cache; 42static kmem_cache_t *spufs_inode_cache;
43 43
44/* Information about the backing dev, same as ramfs */
45#if 0
46static struct backing_dev_info spufs_backing_dev_info = {
47 .ra_pages = 0, /* No readahead */
48 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK |
49 BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY | BDI_CAP_READ_MAP |
50 BDI_CAP_WRITE_MAP,
51};
52
53static struct address_space_operations spufs_aops = {
54 .readpage = simple_readpage,
55 .prepare_write = simple_prepare_write,
56 .commit_write = simple_commit_write,
57};
58#endif
59
60/* Inode operations */
61
62static struct inode * 44static struct inode *
63spufs_alloc_inode(struct super_block *sb) 45spufs_alloc_inode(struct super_block *sb)
64{ 46{
@@ -111,9 +93,6 @@ spufs_setattr(struct dentry *dentry, struct iattr *attr)
111{ 93{
112 struct inode *inode = dentry->d_inode; 94 struct inode *inode = dentry->d_inode;
113 95
114/* dump_stack();
115 pr_debug("ia_size %lld, i_size:%lld\n", attr->ia_size, inode->i_size);
116*/
117 if ((attr->ia_valid & ATTR_SIZE) && 96 if ((attr->ia_valid & ATTR_SIZE) &&
118 (attr->ia_size != inode->i_size)) 97 (attr->ia_size != inode->i_size))
119 return -EINVAL; 98 return -EINVAL;
@@ -127,9 +106,7 @@ spufs_new_file(struct super_block *sb, struct dentry *dentry,
127 struct spu_context *ctx) 106 struct spu_context *ctx)
128{ 107{
129 static struct inode_operations spufs_file_iops = { 108 static struct inode_operations spufs_file_iops = {
130 .getattr = simple_getattr,
131 .setattr = spufs_setattr, 109 .setattr = spufs_setattr,
132 .unlink = simple_unlink,
133 }; 110 };
134 struct inode *inode; 111 struct inode *inode;
135 int ret; 112 int ret;
@@ -183,21 +160,32 @@ out:
183 160
184static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry) 161static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry)
185{ 162{
186 struct dentry *dentry; 163 struct dentry *dentry, *tmp;
164 struct spu_context *ctx;
187 int err; 165 int err;
188 166
189 spin_lock(&dcache_lock);
190 /* remove all entries */ 167 /* remove all entries */
191 err = 0; 168 err = 0;
192 list_for_each_entry(dentry, &dir_dentry->d_subdirs, d_child) { 169 list_for_each_entry_safe(dentry, tmp, &dir_dentry->d_subdirs, d_child) {
193 if (d_unhashed(dentry) || !dentry->d_inode) 170 spin_lock(&dcache_lock);
194 continue;
195 atomic_dec(&dentry->d_count);
196 spin_lock(&dentry->d_lock); 171 spin_lock(&dentry->d_lock);
197 __d_drop(dentry); 172 if (!(d_unhashed(dentry)) && dentry->d_inode) {
198 spin_unlock(&dentry->d_lock); 173 dget_locked(dentry);
174 __d_drop(dentry);
175 spin_unlock(&dentry->d_lock);
176 simple_unlink(dir_dentry->d_inode, dentry);
177 spin_unlock(&dcache_lock);
178 dput(dentry);
179 } else {
180 spin_unlock(&dentry->d_lock);
181 spin_unlock(&dcache_lock);
182 }
199 } 183 }
200 spin_unlock(&dcache_lock); 184
185 /* We have to give up the mm_struct */
186 ctx = SPUFS_I(dir_dentry->d_inode)->i_ctx;
187 spu_forget(ctx);
188
201 if (!err) { 189 if (!err) {
202 shrink_dcache_parent(dir_dentry); 190 shrink_dcache_parent(dir_dentry);
203 err = simple_rmdir(root, dir_dentry); 191 err = simple_rmdir(root, dir_dentry);
@@ -249,7 +237,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
249 inode->i_gid = dir->i_gid; 237 inode->i_gid = dir->i_gid;
250 inode->i_mode &= S_ISGID; 238 inode->i_mode &= S_ISGID;
251 } 239 }
252 ctx = alloc_spu_context(); 240 ctx = alloc_spu_context(inode->i_mapping);
253 SPUFS_I(inode)->i_ctx = ctx; 241 SPUFS_I(inode)->i_ctx = ctx;
254 if (!ctx) 242 if (!ctx)
255 goto out_iput; 243 goto out_iput;
@@ -368,7 +356,8 @@ spufs_parse_options(char *options, struct inode *root)
368} 356}
369 357
370static int 358static int
371spufs_create_root(struct super_block *sb, void *data) { 359spufs_create_root(struct super_block *sb, void *data)
360{
372 struct inode *inode; 361 struct inode *inode;
373 int ret; 362 int ret;
374 363
@@ -441,6 +430,10 @@ static int spufs_init(void)
441 430
442 if (!spufs_inode_cache) 431 if (!spufs_inode_cache)
443 goto out; 432 goto out;
433 if (spu_sched_init() != 0) {
434 kmem_cache_destroy(spufs_inode_cache);
435 goto out;
436 }
444 ret = register_filesystem(&spufs_type); 437 ret = register_filesystem(&spufs_type);
445 if (ret) 438 if (ret)
446 goto out_cache; 439 goto out_cache;
@@ -459,6 +452,7 @@ module_init(spufs_init);
459 452
460static void spufs_exit(void) 453static void spufs_exit(void)
461{ 454{
455 spu_sched_exit();
462 unregister_spu_syscalls(&spufs_calls); 456 unregister_spu_syscalls(&spufs_calls);
463 unregister_filesystem(&spufs_type); 457 unregister_filesystem(&spufs_type);
464 kmem_cache_destroy(spufs_inode_cache); 458 kmem_cache_destroy(spufs_inode_cache);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
new file mode 100644
index 00000000000..c0d9d83a9ac
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -0,0 +1,419 @@
1/* sched.c - SPU scheduler.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
6 * SPU scheduler, based on Linux thread priority. For now use
7 * a simple "cooperative" yield model with no preemption. SPU
8 * scheduling will eventually be preemptive: When a thread with
9 * a higher static priority gets ready to run, then an active SPU
10 * context will be preempted and returned to the waitq.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#define DEBUG 1
28#include <linux/config.h>
29#include <linux/module.h>
30#include <linux/errno.h>
31#include <linux/sched.h>
32#include <linux/kernel.h>
33#include <linux/mm.h>
34#include <linux/completion.h>
35#include <linux/vmalloc.h>
36#include <linux/smp.h>
37#include <linux/smp_lock.h>
38#include <linux/stddef.h>
39#include <linux/unistd.h>
40
41#include <asm/io.h>
42#include <asm/mmu_context.h>
43#include <asm/spu.h>
44#include <asm/spu_csa.h>
45#include "spufs.h"
46
47#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
48struct spu_prio_array {
49 atomic_t nr_blocked;
50 unsigned long bitmap[SPU_BITMAP_SIZE];
51 wait_queue_head_t waitq[MAX_PRIO];
52};
53
54/* spu_runqueue - This is the main runqueue data structure for SPUs. */
55struct spu_runqueue {
56 struct semaphore sem;
57 unsigned long nr_active;
58 unsigned long nr_idle;
59 unsigned long nr_switches;
60 struct list_head active_list;
61 struct list_head idle_list;
62 struct spu_prio_array prio;
63};
64
65static struct spu_runqueue *spu_runqueues = NULL;
66
67static inline struct spu_runqueue *spu_rq(void)
68{
69 /* Future: make this a per-NODE array,
70 * and use cpu_to_node(smp_processor_id())
71 */
72 return spu_runqueues;
73}
74
75static inline struct spu *del_idle(struct spu_runqueue *rq)
76{
77 struct spu *spu;
78
79 BUG_ON(rq->nr_idle <= 0);
80 BUG_ON(list_empty(&rq->idle_list));
81 /* Future: Move SPU out of low-power SRI state. */
82 spu = list_entry(rq->idle_list.next, struct spu, sched_list);
83 list_del_init(&spu->sched_list);
84 rq->nr_idle--;
85 return spu;
86}
87
88static inline void del_active(struct spu_runqueue *rq, struct spu *spu)
89{
90 BUG_ON(rq->nr_active <= 0);
91 BUG_ON(list_empty(&rq->active_list));
92 list_del_init(&spu->sched_list);
93 rq->nr_active--;
94}
95
96static inline void add_idle(struct spu_runqueue *rq, struct spu *spu)
97{
98 /* Future: Put SPU into low-power SRI state. */
99 list_add_tail(&spu->sched_list, &rq->idle_list);
100 rq->nr_idle++;
101}
102
103static inline void add_active(struct spu_runqueue *rq, struct spu *spu)
104{
105 rq->nr_active++;
106 rq->nr_switches++;
107 list_add_tail(&spu->sched_list, &rq->active_list);
108}
109
110static void prio_wakeup(struct spu_runqueue *rq)
111{
112 if (atomic_read(&rq->prio.nr_blocked) && rq->nr_idle) {
113 int best = sched_find_first_bit(rq->prio.bitmap);
114 if (best < MAX_PRIO) {
115 wait_queue_head_t *wq = &rq->prio.waitq[best];
116 wake_up_interruptible_nr(wq, 1);
117 }
118 }
119}
120
121static void prio_wait(struct spu_runqueue *rq, u64 flags)
122{
123 int prio = current->prio;
124 wait_queue_head_t *wq = &rq->prio.waitq[prio];
125 DEFINE_WAIT(wait);
126
127 __set_bit(prio, rq->prio.bitmap);
128 atomic_inc(&rq->prio.nr_blocked);
129 prepare_to_wait_exclusive(wq, &wait, TASK_INTERRUPTIBLE);
130 if (!signal_pending(current)) {
131 up(&rq->sem);
132 pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
133 current->pid, current->prio);
134 schedule();
135 down(&rq->sem);
136 }
137 finish_wait(wq, &wait);
138 atomic_dec(&rq->prio.nr_blocked);
139 if (!waitqueue_active(wq))
140 __clear_bit(prio, rq->prio.bitmap);
141}
142
143static inline int is_best_prio(struct spu_runqueue *rq)
144{
145 int best_prio;
146
147 best_prio = sched_find_first_bit(rq->prio.bitmap);
148 return (current->prio < best_prio) ? 1 : 0;
149}
150
151static inline void mm_needs_global_tlbie(struct mm_struct *mm)
152{
153 /* Global TLBIE broadcast required with SPEs. */
154#if (NR_CPUS > 1)
155 __cpus_setall(&mm->cpu_vm_mask, NR_CPUS);
156#else
157 __cpus_setall(&mm->cpu_vm_mask, NR_CPUS+1); /* is this ok? */
158#endif
159}
160
161static inline void bind_context(struct spu *spu, struct spu_context *ctx)
162{
163 pr_debug("%s: pid=%d SPU=%d\n", __FUNCTION__, current->pid,
164 spu->number);
165 spu->ctx = ctx;
166 spu->flags = 0;
167 ctx->spu = spu;
168 ctx->ops = &spu_hw_ops;
169 spu->pid = current->pid;
170 spu->prio = current->prio;
171 spu->mm = ctx->owner;
172 mm_needs_global_tlbie(spu->mm);
173 spu->ibox_callback = spufs_ibox_callback;
174 spu->wbox_callback = spufs_wbox_callback;
175 mb();
176 spu_restore(&ctx->csa, spu);
177}
178
179static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
180{
181 pr_debug("%s: unbind pid=%d SPU=%d\n", __FUNCTION__,
182 spu->pid, spu->number);
183 spu_save(&ctx->csa, spu);
184 ctx->state = SPU_STATE_SAVED;
185 spu->ibox_callback = NULL;
186 spu->wbox_callback = NULL;
187 spu->mm = NULL;
188 spu->pid = 0;
189 spu->prio = MAX_PRIO;
190 ctx->ops = &spu_backing_ops;
191 ctx->spu = NULL;
192 spu->ctx = NULL;
193}
194
195static struct spu *preempt_active(struct spu_runqueue *rq)
196{
197 struct list_head *p;
198 struct spu_context *ctx;
199 struct spu *spu;
200
201 /* Future: implement real preemption. For now just
202 * boot a lower priority ctx that is in "detached"
203 * state, i.e. on a processor but not currently in
204 * spu_run().
205 */
206 list_for_each(p, &rq->active_list) {
207 spu = list_entry(p, struct spu, sched_list);
208 if (current->prio < spu->prio) {
209 ctx = spu->ctx;
210 if (down_write_trylock(&ctx->state_sema)) {
211 if (ctx->state != SPU_STATE_RUNNABLE) {
212 up_write(&ctx->state_sema);
213 continue;
214 }
215 pr_debug("%s: booting pid=%d from SPU %d\n",
216 __FUNCTION__, spu->pid, spu->number);
217 del_active(rq, spu);
218 up(&rq->sem);
219 unbind_context(spu, ctx);
220 up_write(&ctx->state_sema);
221 return spu;
222 }
223 }
224 }
225 return NULL;
226}
227
228static struct spu *get_idle_spu(u64 flags)
229{
230 struct spu_runqueue *rq;
231 struct spu *spu = NULL;
232
233 rq = spu_rq();
234 down(&rq->sem);
235 for (;;) {
236 if (rq->nr_idle > 0) {
237 if (is_best_prio(rq)) {
238 /* Fall through. */
239 spu = del_idle(rq);
240 break;
241 } else {
242 prio_wakeup(rq);
243 up(&rq->sem);
244 yield();
245 if (signal_pending(current)) {
246 return NULL;
247 }
248 rq = spu_rq();
249 down(&rq->sem);
250 continue;
251 }
252 } else {
253 if (is_best_prio(rq)) {
254 if ((spu = preempt_active(rq)) != NULL)
255 return spu;
256 }
257 prio_wait(rq, flags);
258 if (signal_pending(current)) {
259 prio_wakeup(rq);
260 spu = NULL;
261 break;
262 }
263 continue;
264 }
265 }
266 up(&rq->sem);
267 return spu;
268}
269
270static void put_idle_spu(struct spu *spu)
271{
272 struct spu_runqueue *rq = spu->rq;
273
274 down(&rq->sem);
275 add_idle(rq, spu);
276 prio_wakeup(rq);
277 up(&rq->sem);
278}
279
280static int get_active_spu(struct spu *spu)
281{
282 struct spu_runqueue *rq = spu->rq;
283 struct list_head *p;
284 struct spu *tmp;
285 int rc = 0;
286
287 down(&rq->sem);
288 list_for_each(p, &rq->active_list) {
289 tmp = list_entry(p, struct spu, sched_list);
290 if (tmp == spu) {
291 del_active(rq, spu);
292 rc = 1;
293 break;
294 }
295 }
296 up(&rq->sem);
297 return rc;
298}
299
300static void put_active_spu(struct spu *spu)
301{
302 struct spu_runqueue *rq = spu->rq;
303
304 down(&rq->sem);
305 add_active(rq, spu);
306 up(&rq->sem);
307}
308
309/* Lock order:
310 * spu_activate() & spu_deactivate() require the
311 * caller to have down_write(&ctx->state_sema).
312 *
313 * The rq->sem is breifly held (inside or outside a
314 * given ctx lock) for list management, but is never
315 * held during save/restore.
316 */
317
318int spu_activate(struct spu_context *ctx, u64 flags)
319{
320 struct spu *spu;
321
322 if (ctx->spu)
323 return 0;
324 spu = get_idle_spu(flags);
325 if (!spu)
326 return (signal_pending(current)) ? -ERESTARTSYS : -EAGAIN;
327 bind_context(spu, ctx);
328 put_active_spu(spu);
329 return 0;
330}
331
332void spu_deactivate(struct spu_context *ctx)
333{
334 struct spu *spu;
335 int needs_idle;
336
337 spu = ctx->spu;
338 if (!spu)
339 return;
340 needs_idle = get_active_spu(spu);
341 unbind_context(spu, ctx);
342 if (needs_idle)
343 put_idle_spu(spu);
344}
345
346void spu_yield(struct spu_context *ctx)
347{
348 struct spu *spu;
349
350 if (!down_write_trylock(&ctx->state_sema))
351 return;
352 spu = ctx->spu;
353 if ((ctx->state == SPU_STATE_RUNNABLE) &&
354 (sched_find_first_bit(spu->rq->prio.bitmap) <= current->prio)) {
355 pr_debug("%s: yielding SPU %d\n", __FUNCTION__, spu->number);
356 spu_deactivate(ctx);
357 ctx->state = SPU_STATE_SAVED;
358 }
359 up_write(&ctx->state_sema);
360}
361
362int __init spu_sched_init(void)
363{
364 struct spu_runqueue *rq;
365 struct spu *spu;
366 int i;
367
368 rq = spu_runqueues = kmalloc(sizeof(struct spu_runqueue), GFP_KERNEL);
369 if (!rq) {
370 printk(KERN_WARNING "%s: Unable to allocate runqueues.\n",
371 __FUNCTION__);
372 return 1;
373 }
374 memset(rq, 0, sizeof(struct spu_runqueue));
375 init_MUTEX(&rq->sem);
376 INIT_LIST_HEAD(&rq->active_list);
377 INIT_LIST_HEAD(&rq->idle_list);
378 rq->nr_active = 0;
379 rq->nr_idle = 0;
380 rq->nr_switches = 0;
381 atomic_set(&rq->prio.nr_blocked, 0);
382 for (i = 0; i < MAX_PRIO; i++) {
383 init_waitqueue_head(&rq->prio.waitq[i]);
384 __clear_bit(i, rq->prio.bitmap);
385 }
386 __set_bit(MAX_PRIO, rq->prio.bitmap);
387 for (;;) {
388 spu = spu_alloc();
389 if (!spu)
390 break;
391 pr_debug("%s: adding SPU[%d]\n", __FUNCTION__, spu->number);
392 add_idle(rq, spu);
393 spu->rq = rq;
394 }
395 if (!rq->nr_idle) {
396 printk(KERN_WARNING "%s: No available SPUs.\n", __FUNCTION__);
397 kfree(rq);
398 return 1;
399 }
400 return 0;
401}
402
403void __exit spu_sched_exit(void)
404{
405 struct spu_runqueue *rq = spu_rq();
406 struct spu *spu;
407
408 if (!rq) {
409 printk(KERN_WARNING "%s: no runqueues!\n", __FUNCTION__);
410 return;
411 }
412 while (rq->nr_idle > 0) {
413 spu = del_idle(rq);
414 if (!spu)
415 break;
416 spu_free(spu);
417 }
418 kfree(rq);
419}
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 67aff57faf6..93c6a053756 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -35,15 +35,50 @@ enum {
35 SPUFS_MAGIC = 0x23c9b64e, 35 SPUFS_MAGIC = 0x23c9b64e,
36}; 36};
37 37
38struct spu_context_ops;
39
38struct spu_context { 40struct spu_context {
39 struct spu *spu; /* pointer to a physical SPU */ 41 struct spu *spu; /* pointer to a physical SPU */
40 struct spu_state csa; /* SPU context save area. */ 42 struct spu_state csa; /* SPU context save area. */
41 struct rw_semaphore backing_sema; /* protects the above */
42 spinlock_t mmio_lock; /* protects mmio access */ 43 spinlock_t mmio_lock; /* protects mmio access */
44 struct address_space *local_store;/* local store backing store */
45
46 enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
47 struct rw_semaphore state_sema;
48
49 struct mm_struct *owner;
43 50
44 struct kref kref; 51 struct kref kref;
52 wait_queue_head_t ibox_wq;
53 wait_queue_head_t wbox_wq;
54 struct fasync_struct *ibox_fasync;
55 struct fasync_struct *wbox_fasync;
56 struct spu_context_ops *ops;
57};
58
59/* SPU context query/set operations. */
60struct spu_context_ops {
61 int (*mbox_read) (struct spu_context * ctx, u32 * data);
62 u32(*mbox_stat_read) (struct spu_context * ctx);
63 int (*ibox_read) (struct spu_context * ctx, u32 * data);
64 int (*wbox_write) (struct spu_context * ctx, u32 data);
65 u32(*signal1_read) (struct spu_context * ctx);
66 void (*signal1_write) (struct spu_context * ctx, u32 data);
67 u32(*signal2_read) (struct spu_context * ctx);
68 void (*signal2_write) (struct spu_context * ctx, u32 data);
69 void (*signal1_type_set) (struct spu_context * ctx, u64 val);
70 u64(*signal1_type_get) (struct spu_context * ctx);
71 void (*signal2_type_set) (struct spu_context * ctx, u64 val);
72 u64(*signal2_type_get) (struct spu_context * ctx);
73 u32(*npc_read) (struct spu_context * ctx);
74 void (*npc_write) (struct spu_context * ctx, u32 data);
75 u32(*status_read) (struct spu_context * ctx);
76 char*(*get_ls) (struct spu_context * ctx);
45}; 77};
46 78
79extern struct spu_context_ops spu_hw_ops;
80extern struct spu_context_ops spu_backing_ops;
81
47struct spufs_inode_info { 82struct spufs_inode_info {
48 struct spu_context *i_ctx; 83 struct spu_context *i_ctx;
49 struct inode vfs_inode; 84 struct inode vfs_inode;
@@ -60,14 +95,28 @@ long spufs_create_thread(struct nameidata *nd, const char *name,
60 unsigned int flags, mode_t mode); 95 unsigned int flags, mode_t mode);
61 96
62/* context management */ 97/* context management */
63struct spu_context * alloc_spu_context(void); 98struct spu_context * alloc_spu_context(struct address_space *local_store);
64void destroy_spu_context(struct kref *kref); 99void destroy_spu_context(struct kref *kref);
65struct spu_context * get_spu_context(struct spu_context *ctx); 100struct spu_context * get_spu_context(struct spu_context *ctx);
66int put_spu_context(struct spu_context *ctx); 101int put_spu_context(struct spu_context *ctx);
67 102
103void spu_forget(struct spu_context *ctx);
68void spu_acquire(struct spu_context *ctx); 104void spu_acquire(struct spu_context *ctx);
69void spu_release(struct spu_context *ctx); 105void spu_release(struct spu_context *ctx);
70void spu_acquire_runnable(struct spu_context *ctx); 106int spu_acquire_runnable(struct spu_context *ctx);
71void spu_acquire_saved(struct spu_context *ctx); 107void spu_acquire_saved(struct spu_context *ctx);
72 108
109int spu_activate(struct spu_context *ctx, u64 flags);
110void spu_deactivate(struct spu_context *ctx);
111void spu_yield(struct spu_context *ctx);
112int __init spu_sched_init(void);
113void __exit spu_sched_exit(void);
114
115size_t spu_wbox_write(struct spu_context *ctx, u32 data);
116size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
117
118/* irq callback funcs. */
119void spufs_ibox_callback(struct spu *spu);
120void spufs_wbox_callback(struct spu *spu);
121
73#endif 122#endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 70345b0524f..51266257b0a 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -646,7 +646,7 @@ static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
646 eieio(); 646 eieio();
647 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW); 647 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
648 for (i = 0; i < 4; i++) { 648 for (i = 0; i < 4; i++) {
649 csa->pu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW); 649 csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
650 } 650 }
651 out_be64(&priv2->spu_chnlcnt_RW, 0UL); 651 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
652 eieio(); 652 eieio();
@@ -1667,7 +1667,7 @@ static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1667 eieio(); 1667 eieio();
1668 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]); 1668 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1669 for (i = 0; i < 4; i++) { 1669 for (i = 0; i < 4; i++) {
1670 out_be64(&priv2->spu_chnldata_RW, csa->pu_mailbox_data[i]); 1670 out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1671 } 1671 }
1672 eieio(); 1672 eieio();
1673} 1673}
@@ -2079,7 +2079,10 @@ int spu_save(struct spu_state *prev, struct spu *spu)
2079 acquire_spu_lock(spu); /* Step 1. */ 2079 acquire_spu_lock(spu); /* Step 1. */
2080 rc = __do_spu_save(prev, spu); /* Steps 2-53. */ 2080 rc = __do_spu_save(prev, spu); /* Steps 2-53. */
2081 release_spu_lock(spu); 2081 release_spu_lock(spu);
2082 2082 if (rc) {
2083 panic("%s failed on SPU[%d], rc=%d.\n",
2084 __func__, spu->number, rc);
2085 }
2083 return rc; 2086 return rc;
2084} 2087}
2085 2088
@@ -2098,34 +2101,31 @@ int spu_restore(struct spu_state *new, struct spu *spu)
2098 2101
2099 acquire_spu_lock(spu); 2102 acquire_spu_lock(spu);
2100 harvest(NULL, spu); 2103 harvest(NULL, spu);
2104 spu->stop_code = 0;
2105 spu->dar = 0;
2106 spu->dsisr = 0;
2107 spu->slb_replace = 0;
2108 spu->class_0_pending = 0;
2101 rc = __do_spu_restore(new, spu); 2109 rc = __do_spu_restore(new, spu);
2102 release_spu_lock(spu); 2110 release_spu_lock(spu);
2103 2111 if (rc) {
2112 panic("%s failed on SPU[%d] rc=%d.\n",
2113 __func__, spu->number, rc);
2114 }
2104 return rc; 2115 return rc;
2105} 2116}
2106 2117
2107/** 2118/**
2108 * spu_switch - SPU context switch (save + restore). 2119 * spu_harvest - SPU harvest (reset) operation
2109 * @prev: pointer to SPU context save area, to be saved.
2110 * @new: pointer to SPU context save area, to be restored.
2111 * @spu: pointer to SPU iomem structure. 2120 * @spu: pointer to SPU iomem structure.
2112 * 2121 *
2113 * Perform save, then restore. Only harvest if the 2122 * Perform SPU harvest (reset) operation.
2114 * save fails, as cleanup is otherwise not needed.
2115 */ 2123 */
2116int spu_switch(struct spu_state *prev, struct spu_state *new, struct spu *spu) 2124void spu_harvest(struct spu *spu)
2117{ 2125{
2118 int rc; 2126 acquire_spu_lock(spu);
2119 2127 harvest(NULL, spu);
2120 acquire_spu_lock(spu); /* Save, Step 1. */
2121 rc = __do_spu_save(prev, spu); /* Save, Steps 2-53. */
2122 if (rc != 0) {
2123 harvest(prev, spu);
2124 }
2125 rc = __do_spu_restore(new, spu);
2126 release_spu_lock(spu); 2128 release_spu_lock(spu);
2127
2128 return rc;
2129} 2129}
2130 2130
2131static void init_prob(struct spu_state *csa) 2131static void init_prob(struct spu_state *csa)
@@ -2181,6 +2181,7 @@ static void init_priv2(struct spu_state *csa)
2181void spu_init_csa(struct spu_state *csa) 2181void spu_init_csa(struct spu_state *csa)
2182{ 2182{
2183 struct spu_lscsa *lscsa; 2183 struct spu_lscsa *lscsa;
2184 unsigned char *p;
2184 2185
2185 if (!csa) 2186 if (!csa)
2186 return; 2187 return;
@@ -2192,6 +2193,11 @@ void spu_init_csa(struct spu_state *csa)
2192 2193
2193 memset(lscsa, 0, sizeof(struct spu_lscsa)); 2194 memset(lscsa, 0, sizeof(struct spu_lscsa));
2194 csa->lscsa = lscsa; 2195 csa->lscsa = lscsa;
2196 csa->register_lock = SPIN_LOCK_UNLOCKED;
2197
2198 /* Set LS pages reserved to allow for user-space mapping. */
2199 for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
2200 SetPageReserved(vmalloc_to_page(p));
2195 2201
2196 init_prob(csa); 2202 init_prob(csa);
2197 init_priv1(csa); 2203 init_priv1(csa);
@@ -2200,5 +2206,10 @@ void spu_init_csa(struct spu_state *csa)
2200 2206
2201void spu_fini_csa(struct spu_state *csa) 2207void spu_fini_csa(struct spu_state *csa)
2202{ 2208{
2209 /* Clear reserved bit before vfree. */
2210 unsigned char *p;
2211 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
2212 ClearPageReserved(vmalloc_to_page(p));
2213
2203 vfree(csa->lscsa); 2214 vfree(csa->lscsa);
2204} 2215}
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index 3f71bb5e9d8..17a2b51c94b 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -36,7 +36,7 @@ long do_spu_run(struct file *filp, __u32 __user *unpc, __u32 __user *ustatus)
36 u32 npc, status; 36 u32 npc, status;
37 37
38 ret = -EFAULT; 38 ret = -EFAULT;
39 if (get_user(npc, unpc)) 39 if (get_user(npc, unpc) || get_user(status, ustatus))
40 goto out; 40 goto out;
41 41
42 ret = -EINVAL; 42 ret = -EINVAL;
@@ -46,13 +46,7 @@ long do_spu_run(struct file *filp, __u32 __user *unpc, __u32 __user *ustatus)
46 i = SPUFS_I(filp->f_dentry->d_inode); 46 i = SPUFS_I(filp->f_dentry->d_inode);
47 ret = spufs_run_spu(filp, i->i_ctx, &npc, &status); 47 ret = spufs_run_spu(filp, i->i_ctx, &npc, &status);
48 48
49 if (ret ==-EAGAIN || ret == -EIO) 49 if (put_user(npc, unpc) || put_user(status, ustatus))
50 ret = status;
51
52 if (put_user(npc, unpc))
53 ret = -EFAULT;
54
55 if (ustatus && put_user(status, ustatus))
56 ret = -EFAULT; 50 ret = -EFAULT;
57out: 51out:
58 return ret; 52 return ret;
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index 62718f3ba03..092ec97be32 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -105,6 +105,9 @@
105#define SPU_CONTEXT_SWITCH_PENDING (1UL << SPU_CONTEXT_SWITCH_PENDING_nr) 105#define SPU_CONTEXT_SWITCH_PENDING (1UL << SPU_CONTEXT_SWITCH_PENDING_nr)
106#define SPU_CONTEXT_SWITCH_ACTIVE (1UL << SPU_CONTEXT_SWITCH_ACTIVE_nr) 106#define SPU_CONTEXT_SWITCH_ACTIVE (1UL << SPU_CONTEXT_SWITCH_ACTIVE_nr)
107 107
108struct spu_context;
109struct spu_runqueue;
110
108struct spu { 111struct spu {
109 char *name; 112 char *name;
110 unsigned long local_store_phys; 113 unsigned long local_store_phys;
@@ -113,23 +116,28 @@ struct spu {
113 struct spu_priv1 __iomem *priv1; 116 struct spu_priv1 __iomem *priv1;
114 struct spu_priv2 __iomem *priv2; 117 struct spu_priv2 __iomem *priv2;
115 struct list_head list; 118 struct list_head list;
119 struct list_head sched_list;
116 int number; 120 int number;
117 u32 isrc; 121 u32 isrc;
118 u32 node; 122 u32 node;
119 u64 flags; 123 u64 flags;
124 u64 dar;
125 u64 dsisr;
120 struct kref kref; 126 struct kref kref;
121 size_t ls_size; 127 size_t ls_size;
122 unsigned int slb_replace; 128 unsigned int slb_replace;
123 struct mm_struct *mm; 129 struct mm_struct *mm;
130 struct spu_context *ctx;
131 struct spu_runqueue *rq;
132 pid_t pid;
133 int prio;
124 int class_0_pending; 134 int class_0_pending;
125 spinlock_t register_lock; 135 spinlock_t register_lock;
126 136
127 u32 stop_code; 137 u32 stop_code;
128 wait_queue_head_t stop_wq; 138 wait_queue_head_t stop_wq;
129 wait_queue_head_t ibox_wq; 139 void (* wbox_callback)(struct spu *spu);
130 wait_queue_head_t wbox_wq; 140 void (* ibox_callback)(struct spu *spu);
131 struct fasync_struct *ibox_fasync;
132 struct fasync_struct *wbox_fasync;
133 141
134 char irq_c0[8]; 142 char irq_c0[8];
135 char irq_c1[8]; 143 char irq_c1[8];
@@ -140,9 +148,6 @@ struct spu *spu_alloc(void);
140void spu_free(struct spu *spu); 148void spu_free(struct spu *spu);
141int spu_run(struct spu *spu); 149int spu_run(struct spu *spu);
142 150
143size_t spu_wbox_write(struct spu *spu, u32 data);
144size_t spu_ibox_read(struct spu *spu, u32 *data);
145
146extern struct spufs_calls { 151extern struct spufs_calls {
147 asmlinkage long (*create_thread)(const char __user *name, 152 asmlinkage long (*create_thread)(const char __user *name,
148 unsigned int flags, mode_t mode); 153 unsigned int flags, mode_t mode);
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h
index 989a0688144..2a8af416638 100644
--- a/include/asm-powerpc/spu_csa.h
+++ b/include/asm-powerpc/spu_csa.h
@@ -241,6 +241,7 @@ struct spu_state {
241 unsigned long suspend_time; 241 unsigned long suspend_time;
242 u64 slb_esid_RW[8]; 242 u64 slb_esid_RW[8];
243 u64 slb_vsid_RW[8]; 243 u64 slb_vsid_RW[8];
244 spinlock_t register_lock;
244}; 245};
245 246
246extern void spu_init_csa(struct spu_state *csa); 247extern void spu_init_csa(struct spu_state *csa);