aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/kernel/systbl.S2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/platforms/cell/Kconfig13
-rw-r--r--arch/powerpc/platforms/cell/Makefile3
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c740
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c86
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile3
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c67
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c596
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c470
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h71
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c106
13 files changed, 2159 insertions, 0 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4d71aa3ecbb5..39ca7b9da369 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -482,6 +482,7 @@ source arch/powerpc/platforms/embedded6xx/Kconfig
482source arch/powerpc/platforms/4xx/Kconfig 482source arch/powerpc/platforms/4xx/Kconfig
483source arch/powerpc/platforms/85xx/Kconfig 483source arch/powerpc/platforms/85xx/Kconfig
484source arch/powerpc/platforms/8xx/Kconfig 484source arch/powerpc/platforms/8xx/Kconfig
485source arch/powerpc/platforms/cell/Kconfig
485 486
486menu "Kernel options" 487menu "Kernel options"
487 488
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 4bb3650420b4..989f6286991a 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -319,3 +319,5 @@ COMPAT_SYS(ioprio_get)
319SYSCALL(inotify_init) 319SYSCALL(inotify_init)
320SYSCALL(inotify_add_watch) 320SYSCALL(inotify_add_watch)
321SYSCALL(inotify_rm_watch) 321SYSCALL(inotify_rm_watch)
322SYSCALL(spu_run)
323SYSCALL(spu_create)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index a606504678bd..846a1894cf95 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -644,6 +644,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
644 DBG_LOW(" -> rc=%d\n", rc); 644 DBG_LOW(" -> rc=%d\n", rc);
645 return rc; 645 return rc;
646} 646}
647EXPORT_SYMBOL_GPL(hash_page);
647 648
648void hash_preload(struct mm_struct *mm, unsigned long ea, 649void hash_preload(struct mm_struct *mm, unsigned long ea,
649 unsigned long access, unsigned long trap) 650 unsigned long access, unsigned long trap)
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
new file mode 100644
index 000000000000..3157071e241c
--- /dev/null
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -0,0 +1,13 @@
1menu "Cell Broadband Engine options"
2 depends on PPC_CELL
3
4config SPU_FS
5 tristate "SPU file system"
6 default m
7 depends on PPC_CELL
8 help
9 The SPU file system is used to access Synergistic Processing
10 Units on machines implementing the Broadband Processor
11 Architecture.
12
13endmenu
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 55e094b96bc0..74616cf13af9 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,2 +1,5 @@
1obj-y += interrupt.o iommu.o setup.o spider-pic.o 1obj-y += interrupt.o iommu.o setup.o spider-pic.o
2obj-$(CONFIG_SMP) += smp.o 2obj-$(CONFIG_SMP) += smp.o
3obj-$(CONFIG_SPU_FS) += spufs/ spu_base.o
4builtin-spufs-$(CONFIG_SPU_FS) += spu_syscalls.o
5obj-y += $(builtin-spufs-m)
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
new file mode 100644
index 000000000000..9e9096590a07
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -0,0 +1,740 @@
1/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#define DEBUG 1
24
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/poll.h>
29#include <linux/ptrace.h>
30#include <linux/slab.h>
31#include <linux/wait.h>
32
33#include <asm/io.h>
34#include <asm/prom.h>
35#include <asm/semaphore.h>
36#include <asm/spu.h>
37#include <asm/mmu_context.h>
38
39#include "interrupt.h"
40
41static int __spu_trap_invalid_dma(struct spu *spu)
42{
43 pr_debug("%s\n", __FUNCTION__);
44 force_sig(SIGBUS, /* info, */ current);
45 return 0;
46}
47
48static int __spu_trap_dma_align(struct spu *spu)
49{
50 pr_debug("%s\n", __FUNCTION__);
51 force_sig(SIGBUS, /* info, */ current);
52 return 0;
53}
54
55static int __spu_trap_error(struct spu *spu)
56{
57 pr_debug("%s\n", __FUNCTION__);
58 force_sig(SIGILL, /* info, */ current);
59 return 0;
60}
61
62static void spu_restart_dma(struct spu *spu)
63{
64 struct spu_priv2 __iomem *priv2 = spu->priv2;
65 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
66}
67
68static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
69{
70 struct spu_priv2 __iomem *priv2;
71 struct mm_struct *mm;
72
73 pr_debug("%s\n", __FUNCTION__);
74
75 if (REGION_ID(ea) != USER_REGION_ID) {
76 pr_debug("invalid region access at %016lx\n", ea);
77 return 1;
78 }
79
80 priv2 = spu->priv2;
81 mm = spu->mm;
82
83 if (spu->slb_replace >= 8)
84 spu->slb_replace = 0;
85
86 out_be64(&priv2->slb_index_W, spu->slb_replace);
87 out_be64(&priv2->slb_vsid_RW,
88 (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT)
89 | SLB_VSID_USER);
90 out_be64(&priv2->slb_esid_RW, (ea & ESID_MASK) | SLB_ESID_V);
91
92 spu_restart_dma(spu);
93
94 pr_debug("set slb %d context %lx, ea %016lx, vsid %016lx, esid %016lx\n",
95 spu->slb_replace, mm->context.id, ea,
96 (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT)| SLB_VSID_USER,
97 (ea & ESID_MASK) | SLB_ESID_V);
98 return 0;
99}
100
101static int __spu_trap_data_map(struct spu *spu, unsigned long ea)
102{
103 unsigned long dsisr;
104 struct spu_priv1 __iomem *priv1;
105
106 pr_debug("%s\n", __FUNCTION__);
107 priv1 = spu->priv1;
108 dsisr = in_be64(&priv1->mfc_dsisr_RW);
109
110 wake_up(&spu->stop_wq);
111
112 return 0;
113}
114
115static int __spu_trap_mailbox(struct spu *spu)
116{
117 wake_up_all(&spu->ibox_wq);
118 kill_fasync(&spu->ibox_fasync, SIGIO, POLLIN);
119
120 /* atomically disable SPU mailbox interrupts */
121 spin_lock(&spu->register_lock);
122 out_be64(&spu->priv1->int_mask_class2_RW,
123 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1);
124 spin_unlock(&spu->register_lock);
125 return 0;
126}
127
128static int __spu_trap_stop(struct spu *spu)
129{
130 pr_debug("%s\n", __FUNCTION__);
131 spu->stop_code = in_be32(&spu->problem->spu_status_R);
132 wake_up(&spu->stop_wq);
133 return 0;
134}
135
136static int __spu_trap_halt(struct spu *spu)
137{
138 pr_debug("%s\n", __FUNCTION__);
139 spu->stop_code = in_be32(&spu->problem->spu_status_R);
140 wake_up(&spu->stop_wq);
141 return 0;
142}
143
144static int __spu_trap_tag_group(struct spu *spu)
145{
146 pr_debug("%s\n", __FUNCTION__);
147 /* wake_up(&spu->dma_wq); */
148 return 0;
149}
150
151static int __spu_trap_spubox(struct spu *spu)
152{
153 wake_up_all(&spu->wbox_wq);
154 kill_fasync(&spu->wbox_fasync, SIGIO, POLLOUT);
155
156 /* atomically disable SPU mailbox interrupts */
157 spin_lock(&spu->register_lock);
158 out_be64(&spu->priv1->int_mask_class2_RW,
159 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10);
160 spin_unlock(&spu->register_lock);
161 return 0;
162}
163
164static irqreturn_t
165spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
166{
167 struct spu *spu;
168
169 spu = data;
170 spu->class_0_pending = 1;
171 wake_up(&spu->stop_wq);
172
173 return IRQ_HANDLED;
174}
175
176static int
177spu_irq_class_0_bottom(struct spu *spu)
178{
179 unsigned long stat;
180
181 spu->class_0_pending = 0;
182
183 stat = in_be64(&spu->priv1->int_stat_class0_RW);
184
185 if (stat & 1) /* invalid MFC DMA */
186 __spu_trap_invalid_dma(spu);
187
188 if (stat & 2) /* invalid DMA alignment */
189 __spu_trap_dma_align(spu);
190
191 if (stat & 4) /* error on SPU */
192 __spu_trap_error(spu);
193
194 out_be64(&spu->priv1->int_stat_class0_RW, stat);
195 return 0;
196}
197
198static irqreturn_t
199spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
200{
201 struct spu *spu;
202 unsigned long stat, dar;
203
204 spu = data;
205 stat = in_be64(&spu->priv1->int_stat_class1_RW);
206 dar = in_be64(&spu->priv1->mfc_dar_RW);
207
208 if (stat & 1) /* segment fault */
209 __spu_trap_data_seg(spu, dar);
210
211 if (stat & 2) { /* mapping fault */
212 __spu_trap_data_map(spu, dar);
213 }
214
215 if (stat & 4) /* ls compare & suspend on get */
216 ;
217
218 if (stat & 8) /* ls compare & suspend on put */
219 ;
220
221 out_be64(&spu->priv1->int_stat_class1_RW, stat);
222 return stat ? IRQ_HANDLED : IRQ_NONE;
223}
224
225static irqreturn_t
226spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
227{
228 struct spu *spu;
229 unsigned long stat;
230
231 spu = data;
232 stat = in_be64(&spu->priv1->int_stat_class2_RW);
233
234 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat,
235 in_be64(&spu->priv1->int_mask_class2_RW));
236
237
238 if (stat & 1) /* PPC core mailbox */
239 __spu_trap_mailbox(spu);
240
241 if (stat & 2) /* SPU stop-and-signal */
242 __spu_trap_stop(spu);
243
244 if (stat & 4) /* SPU halted */
245 __spu_trap_halt(spu);
246
247 if (stat & 8) /* DMA tag group complete */
248 __spu_trap_tag_group(spu);
249
250 if (stat & 0x10) /* SPU mailbox threshold */
251 __spu_trap_spubox(spu);
252
253 out_be64(&spu->priv1->int_stat_class2_RW, stat);
254 return stat ? IRQ_HANDLED : IRQ_NONE;
255}
256
257static int
258spu_request_irqs(struct spu *spu)
259{
260 int ret;
261 int irq_base;
262
263 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
264
265 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
266 ret = request_irq(irq_base + spu->isrc,
267 spu_irq_class_0, 0, spu->irq_c0, spu);
268 if (ret)
269 goto out;
270 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
271
272 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
273 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
274 spu_irq_class_1, 0, spu->irq_c1, spu);
275 if (ret)
276 goto out1;
277 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
278
279 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
280 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
281 spu_irq_class_2, 0, spu->irq_c2, spu);
282 if (ret)
283 goto out2;
284 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
285 goto out;
286
287out2:
288 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
289out1:
290 free_irq(irq_base + spu->isrc, spu);
291out:
292 return ret;
293}
294
295static void
296spu_free_irqs(struct spu *spu)
297{
298 int irq_base;
299
300 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
301
302 free_irq(irq_base + spu->isrc, spu);
303 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
304 free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
305}
306
307static LIST_HEAD(spu_list);
308static DECLARE_MUTEX(spu_mutex);
309
310static void spu_init_channels(struct spu *spu)
311{
312 static const struct {
313 unsigned channel;
314 unsigned count;
315 } zero_list[] = {
316 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
317 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
318 }, count_list[] = {
319 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
320 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
321 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
322 };
323 struct spu_priv2 *priv2;
324 int i;
325
326 priv2 = spu->priv2;
327
328 /* initialize all channel data to zero */
329 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
330 int count;
331
332 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
333 for (count = 0; count < zero_list[i].count; count++)
334 out_be64(&priv2->spu_chnldata_RW, 0);
335 }
336
337 /* initialize channel counts to meaningful values */
338 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
339 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
340 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
341 }
342}
343
344static void spu_init_regs(struct spu *spu)
345{
346 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
347 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
348 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
349}
350
351struct spu *spu_alloc(void)
352{
353 struct spu *spu;
354
355 down(&spu_mutex);
356 if (!list_empty(&spu_list)) {
357 spu = list_entry(spu_list.next, struct spu, list);
358 list_del_init(&spu->list);
359 pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
360 } else {
361 pr_debug("No SPU left\n");
362 spu = NULL;
363 }
364 up(&spu_mutex);
365
366 if (spu) {
367 spu_init_channels(spu);
368 spu_init_regs(spu);
369 }
370
371 return spu;
372}
373EXPORT_SYMBOL(spu_alloc);
374
375void spu_free(struct spu *spu)
376{
377 down(&spu_mutex);
378 spu->ibox_fasync = NULL;
379 spu->wbox_fasync = NULL;
380 list_add_tail(&spu->list, &spu_list);
381 up(&spu_mutex);
382}
383EXPORT_SYMBOL(spu_free);
384
385extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
386static int spu_handle_mm_fault(struct spu *spu)
387{
388 struct spu_priv1 __iomem *priv1;
389 struct mm_struct *mm = spu->mm;
390 struct vm_area_struct *vma;
391 u64 ea, dsisr, is_write;
392 int ret;
393
394 priv1 = spu->priv1;
395 ea = in_be64(&priv1->mfc_dar_RW);
396 dsisr = in_be64(&priv1->mfc_dsisr_RW);
397#if 0
398 if (!IS_VALID_EA(ea)) {
399 return -EFAULT;
400 }
401#endif /* XXX */
402 if (mm == NULL) {
403 return -EFAULT;
404 }
405 if (mm->pgd == NULL) {
406 return -EFAULT;
407 }
408
409 down_read(&mm->mmap_sem);
410 vma = find_vma(mm, ea);
411 if (!vma)
412 goto bad_area;
413 if (vma->vm_start <= ea)
414 goto good_area;
415 if (!(vma->vm_flags & VM_GROWSDOWN))
416 goto bad_area;
417#if 0
418 if (expand_stack(vma, ea))
419 goto bad_area;
420#endif /* XXX */
421good_area:
422 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
423 if (is_write) {
424 if (!(vma->vm_flags & VM_WRITE))
425 goto bad_area;
426 } else {
427 if (dsisr & MFC_DSISR_ACCESS_DENIED)
428 goto bad_area;
429 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
430 goto bad_area;
431 }
432 ret = 0;
433 switch (handle_mm_fault(mm, vma, ea, is_write)) {
434 case VM_FAULT_MINOR:
435 current->min_flt++;
436 break;
437 case VM_FAULT_MAJOR:
438 current->maj_flt++;
439 break;
440 case VM_FAULT_SIGBUS:
441 ret = -EFAULT;
442 goto bad_area;
443 case VM_FAULT_OOM:
444 ret = -ENOMEM;
445 goto bad_area;
446 default:
447 BUG();
448 }
449 up_read(&mm->mmap_sem);
450 return ret;
451
452bad_area:
453 up_read(&mm->mmap_sem);
454 return -EFAULT;
455}
456
457static int spu_handle_pte_fault(struct spu *spu)
458{
459 struct spu_priv1 __iomem *priv1;
460 u64 ea, dsisr, access, error = 0UL;
461 int ret = 0;
462
463 priv1 = spu->priv1;
464 ea = in_be64(&priv1->mfc_dar_RW);
465 dsisr = in_be64(&priv1->mfc_dsisr_RW);
466 access = (_PAGE_PRESENT | _PAGE_USER);
467 if (dsisr & MFC_DSISR_PTE_NOT_FOUND) {
468 if (hash_page(ea, access, 0x300) != 0)
469 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
470 }
471 if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) ||
472 (dsisr & MFC_DSISR_ACCESS_DENIED)) {
473 if ((ret = spu_handle_mm_fault(spu)) != 0)
474 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
475 else
476 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
477 }
478 if (!error)
479 spu_restart_dma(spu);
480
481 return ret;
482}
483
484int spu_run(struct spu *spu)
485{
486 struct spu_problem __iomem *prob;
487 struct spu_priv1 __iomem *priv1;
488 struct spu_priv2 __iomem *priv2;
489 unsigned long status;
490 int ret;
491
492 prob = spu->problem;
493 priv1 = spu->priv1;
494 priv2 = spu->priv2;
495
496 /* Let SPU run. */
497 spu->mm = current->mm;
498 eieio();
499 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
500
501 do {
502 ret = wait_event_interruptible(spu->stop_wq,
503 (!((status = in_be32(&prob->spu_status_R)) & 0x1))
504 || (in_be64(&priv1->mfc_dsisr_RW) & MFC_DSISR_PTE_NOT_FOUND)
505 || spu->class_0_pending);
506
507 if (status & SPU_STATUS_STOPPED_BY_STOP)
508 ret = -EAGAIN;
509 else if (status & SPU_STATUS_STOPPED_BY_HALT)
510 ret = -EIO;
511 else if (in_be64(&priv1->mfc_dsisr_RW) & MFC_DSISR_PTE_NOT_FOUND)
512 ret = spu_handle_pte_fault(spu);
513
514 if (spu->class_0_pending)
515 spu_irq_class_0_bottom(spu);
516
517 if (!ret && signal_pending(current))
518 ret = -ERESTARTSYS;
519
520 } while (!ret);
521
522 /* Ensure SPU is stopped. */
523 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
524 eieio();
525 while (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)
526 cpu_relax();
527
528 out_be64(&priv2->slb_invalidate_all_W, 0);
529 out_be64(&priv1->tlb_invalidate_entry_W, 0UL);
530 eieio();
531
532 spu->mm = NULL;
533
534 /* Check for SPU breakpoint. */
535 if (unlikely(current->ptrace & PT_PTRACED)) {
536 status = in_be32(&prob->spu_status_R);
537
538 if ((status & SPU_STATUS_STOPPED_BY_STOP)
539 && status >> SPU_STOP_STATUS_SHIFT == 0x3fff) {
540 force_sig(SIGTRAP, current);
541 ret = -ERESTARTSYS;
542 }
543 }
544
545 return ret;
546}
547EXPORT_SYMBOL(spu_run);
548
549static void __iomem * __init map_spe_prop(struct device_node *n,
550 const char *name)
551{
552 struct address_prop {
553 unsigned long address;
554 unsigned int len;
555 } __attribute__((packed)) *prop;
556
557 void *p;
558 int proplen;
559
560 p = get_property(n, name, &proplen);
561 if (proplen != sizeof (struct address_prop))
562 return NULL;
563
564 prop = p;
565
566 return ioremap(prop->address, prop->len);
567}
568
569static void spu_unmap(struct spu *spu)
570{
571 iounmap(spu->priv2);
572 iounmap(spu->priv1);
573 iounmap(spu->problem);
574 iounmap((u8 __iomem *)spu->local_store);
575}
576
577static int __init spu_map_device(struct spu *spu, struct device_node *spe)
578{
579 char *prop;
580 int ret;
581
582 ret = -ENODEV;
583 prop = get_property(spe, "isrc", NULL);
584 if (!prop)
585 goto out;
586 spu->isrc = *(unsigned int *)prop;
587
588 spu->name = get_property(spe, "name", NULL);
589 if (!spu->name)
590 goto out;
591
592 prop = get_property(spe, "local-store", NULL);
593 if (!prop)
594 goto out;
595 spu->local_store_phys = *(unsigned long *)prop;
596
597 /* we use local store as ram, not io memory */
598 spu->local_store = (void __force *)map_spe_prop(spe, "local-store");
599 if (!spu->local_store)
600 goto out;
601
602 spu->problem= map_spe_prop(spe, "problem");
603 if (!spu->problem)
604 goto out_unmap;
605
606 spu->priv1= map_spe_prop(spe, "priv1");
607 if (!spu->priv1)
608 goto out_unmap;
609
610 spu->priv2= map_spe_prop(spe, "priv2");
611 if (!spu->priv2)
612 goto out_unmap;
613 ret = 0;
614 goto out;
615
616out_unmap:
617 spu_unmap(spu);
618out:
619 return ret;
620}
621
622static int __init find_spu_node_id(struct device_node *spe)
623{
624 unsigned int *id;
625 struct device_node *cpu;
626
627 cpu = spe->parent->parent;
628 id = (unsigned int *)get_property(cpu, "node-id", NULL);
629
630 return id ? *id : 0;
631}
632
633static int __init create_spu(struct device_node *spe)
634{
635 struct spu *spu;
636 int ret;
637 static int number;
638
639 ret = -ENOMEM;
640 spu = kmalloc(sizeof (*spu), GFP_KERNEL);
641 if (!spu)
642 goto out;
643
644 ret = spu_map_device(spu, spe);
645 if (ret)
646 goto out_free;
647
648 spu->node = find_spu_node_id(spe);
649 spu->stop_code = 0;
650 spu->slb_replace = 0;
651 spu->mm = NULL;
652 spu->class_0_pending = 0;
653 spin_lock_init(&spu->register_lock);
654
655 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
656 out_be64(&spu->priv1->mfc_sr1_RW, 0x33);
657
658 init_waitqueue_head(&spu->stop_wq);
659 init_waitqueue_head(&spu->wbox_wq);
660 init_waitqueue_head(&spu->ibox_wq);
661
662 spu->ibox_fasync = NULL;
663 spu->wbox_fasync = NULL;
664
665 down(&spu_mutex);
666 spu->number = number++;
667 ret = spu_request_irqs(spu);
668 if (ret)
669 goto out_unmap;
670
671 list_add(&spu->list, &spu_list);
672 up(&spu_mutex);
673
674 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
675 spu->name, spu->isrc, spu->local_store,
676 spu->problem, spu->priv1, spu->priv2, spu->number);
677 goto out;
678
679out_unmap:
680 up(&spu_mutex);
681 spu_unmap(spu);
682out_free:
683 kfree(spu);
684out:
685 return ret;
686}
687
688static void destroy_spu(struct spu *spu)
689{
690 list_del_init(&spu->list);
691
692 spu_free_irqs(spu);
693 spu_unmap(spu);
694 kfree(spu);
695}
696
697static void cleanup_spu_base(void)
698{
699 struct spu *spu, *tmp;
700 down(&spu_mutex);
701 list_for_each_entry_safe(spu, tmp, &spu_list, list)
702 destroy_spu(spu);
703 up(&spu_mutex);
704}
705module_exit(cleanup_spu_base);
706
707static int __init init_spu_base(void)
708{
709 struct device_node *node;
710 int ret;
711
712 ret = -ENODEV;
713 for (node = of_find_node_by_type(NULL, "spe");
714 node; node = of_find_node_by_type(node, "spe")) {
715 ret = create_spu(node);
716 if (ret) {
717 printk(KERN_WARNING "%s: Error initializing %s\n",
718 __FUNCTION__, node->name);
719 cleanup_spu_base();
720 break;
721 }
722 }
723 /* in some old firmware versions, the spe is called 'spc', so we
724 look for that as well */
725 for (node = of_find_node_by_type(NULL, "spc");
726 node; node = of_find_node_by_type(node, "spc")) {
727 ret = create_spu(node);
728 if (ret) {
729 printk(KERN_WARNING "%s: Error initializing %s\n",
730 __FUNCTION__, node->name);
731 cleanup_spu_base();
732 break;
733 }
734 }
735 return ret;
736}
737module_init(init_spu_base);
738
739MODULE_LICENSE("GPL");
740MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
new file mode 100644
index 000000000000..43e0b187ffde
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -0,0 +1,86 @@
1/*
2 * SPU file system -- system call stubs
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22#include <linux/file.h>
23#include <linux/module.h>
24#include <linux/syscalls.h>
25
26#include <asm/spu.h>
27
28struct spufs_calls spufs_calls = {
29 .owner = NULL,
30};
31
32/* These stub syscalls are needed to have the actual implementation
33 * within a loadable module. When spufs is built into the kernel,
34 * this file is not used and the syscalls directly enter the fs code */
35
36asmlinkage long sys_spu_create(const char __user *name,
37 unsigned int flags, mode_t mode)
38{
39 long ret;
40
41 ret = -ENOSYS;
42 if (try_module_get(spufs_calls.owner)) {
43 ret = spufs_calls.create_thread(name, flags, mode);
44 module_put(spufs_calls.owner);
45 }
46 return ret;
47}
48
49asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
50{
51 long ret;
52 struct file *filp;
53 int fput_needed;
54
55 ret = -ENOSYS;
56 if (try_module_get(spufs_calls.owner)) {
57 ret = -EBADF;
58 filp = fget_light(fd, &fput_needed);
59 if (filp) {
60 ret = spufs_calls.spu_run(filp, unpc, ustatus);
61 fput_light(filp, fput_needed);
62 }
63 module_put(spufs_calls.owner);
64 }
65 return ret;
66}
67
68int register_spu_syscalls(struct spufs_calls *calls)
69{
70 if (spufs_calls.owner)
71 return -EBUSY;
72
73 spufs_calls.create_thread = calls->create_thread;
74 spufs_calls.spu_run = calls->spu_run;
75 smp_mb();
76 spufs_calls.owner = calls->owner;
77 return 0;
78}
79EXPORT_SYMBOL_GPL(register_spu_syscalls);
80
81void unregister_spu_syscalls(struct spufs_calls *calls)
82{
83 BUG_ON(spufs_calls.owner != calls->owner);
84 spufs_calls.owner = NULL;
85}
86EXPORT_SYMBOL_GPL(unregister_spu_syscalls);
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
new file mode 100644
index 000000000000..6f496e37bcb7
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_SPU_FS) += spufs.o
2
3spufs-y += inode.o file.o context.o syscalls.o
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
new file mode 100644
index 000000000000..a69b85e2778a
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -0,0 +1,67 @@
1/*
2 * SPU file system -- SPU context management
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/slab.h>
24#include <asm/spu.h>
25#include "spufs.h"
26
27struct spu_context *alloc_spu_context(void)
28{
29 struct spu_context *ctx;
30 ctx = kmalloc(sizeof *ctx, GFP_KERNEL);
31 if (!ctx)
32 goto out;
33 ctx->spu = spu_alloc();
34 if (!ctx->spu)
35 goto out_free;
36 init_rwsem(&ctx->backing_sema);
37 spin_lock_init(&ctx->mmio_lock);
38 kref_init(&ctx->kref);
39 goto out;
40out_free:
41 kfree(ctx);
42 ctx = NULL;
43out:
44 return ctx;
45}
46
47void destroy_spu_context(struct kref *kref)
48{
49 struct spu_context *ctx;
50 ctx = container_of(kref, struct spu_context, kref);
51 if (ctx->spu)
52 spu_free(ctx->spu);
53 kfree(ctx);
54}
55
56struct spu_context * get_spu_context(struct spu_context *ctx)
57{
58 kref_get(&ctx->kref);
59 return ctx;
60}
61
62int put_spu_context(struct spu_context *ctx)
63{
64 return kref_put(&ctx->kref, &destroy_spu_context);
65}
66
67
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
new file mode 100644
index 000000000000..c1e643310494
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -0,0 +1,596 @@
1/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/fs.h>
24#include <linux/ioctl.h>
25#include <linux/module.h>
26#include <linux/poll.h>
27
28#include <asm/io.h>
29#include <asm/semaphore.h>
30#include <asm/spu.h>
31#include <asm/uaccess.h>
32
33#include "spufs.h"
34
35static int
36spufs_mem_open(struct inode *inode, struct file *file)
37{
38 struct spufs_inode_info *i = SPUFS_I(inode);
39 file->private_data = i->i_ctx;
40 return 0;
41}
42
43static ssize_t
44spufs_mem_read(struct file *file, char __user *buffer,
45 size_t size, loff_t *pos)
46{
47 struct spu *spu;
48 struct spu_context *ctx;
49 int ret;
50
51 ctx = file->private_data;
52 spu = ctx->spu;
53
54 down_read(&ctx->backing_sema);
55 if (spu->number & 0/*1*/) {
56 ret = generic_file_read(file, buffer, size, pos);
57 goto out;
58 }
59
60 ret = simple_read_from_buffer(buffer, size, pos,
61 spu->local_store, LS_SIZE);
62out:
63 up_read(&ctx->backing_sema);
64 return ret;
65}
66
67static ssize_t
68spufs_mem_write(struct file *file, const char __user *buffer,
69 size_t size, loff_t *pos)
70{
71 struct spu_context *ctx = file->private_data;
72 struct spu *spu = ctx->spu;
73
74 if (spu->number & 0) //1)
75 return generic_file_write(file, buffer, size, pos);
76
77 size = min_t(ssize_t, LS_SIZE - *pos, size);
78 if (size <= 0)
79 return -EFBIG;
80 *pos += size;
81 return copy_from_user(spu->local_store + *pos - size,
82 buffer, size) ? -EFAULT : size;
83}
84
85static int
86spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
87{
88 struct spu_context *ctx = file->private_data;
89 struct spu *spu = ctx->spu;
90 unsigned long pfn;
91
92 if (spu->number & 0) //1)
93 return generic_file_mmap(file, vma);
94
95 vma->vm_flags |= VM_RESERVED;
96 vma->vm_page_prot = __pgprot(pgprot_val (vma->vm_page_prot)
97 | _PAGE_NO_CACHE);
98 pfn = spu->local_store_phys >> PAGE_SHIFT;
99 /*
100 * This will work for actual SPUs, but not for vmalloc memory:
101 */
102 if (remap_pfn_range(vma, vma->vm_start, pfn,
103 vma->vm_end-vma->vm_start, vma->vm_page_prot))
104 return -EAGAIN;
105 return 0;
106}
107
108static struct file_operations spufs_mem_fops = {
109 .open = spufs_mem_open,
110 .read = spufs_mem_read,
111 .write = spufs_mem_write,
112 .mmap = spufs_mem_mmap,
113 .llseek = generic_file_llseek,
114};
115
116/* generic open function for all pipe-like files */
117static int spufs_pipe_open(struct inode *inode, struct file *file)
118{
119 struct spufs_inode_info *i = SPUFS_I(inode);
120 file->private_data = i->i_ctx;
121
122 return nonseekable_open(inode, file);
123}
124
125static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
126 size_t len, loff_t *pos)
127{
128 struct spu_context *ctx;
129 struct spu_problem __iomem *prob;
130 u32 mbox_stat;
131 u32 mbox_data;
132
133 if (len < 4)
134 return -EINVAL;
135
136 ctx = file->private_data;
137 prob = ctx->spu->problem;
138 mbox_stat = in_be32(&prob->mb_stat_R);
139 if (!(mbox_stat & 0x0000ff))
140 return -EAGAIN;
141
142 mbox_data = in_be32(&prob->pu_mb_R);
143
144 if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
145 return -EFAULT;
146
147 return 4;
148}
149
150static struct file_operations spufs_mbox_fops = {
151 .open = spufs_pipe_open,
152 .read = spufs_mbox_read,
153};
154
155static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
156 size_t len, loff_t *pos)
157{
158 struct spu_context *ctx;
159 u32 mbox_stat;
160
161 if (len < 4)
162 return -EINVAL;
163
164 ctx = file->private_data;
165 mbox_stat = in_be32(&ctx->spu->problem->mb_stat_R) & 0xff;
166
167 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
168 return -EFAULT;
169
170 return 4;
171}
172
173static struct file_operations spufs_mbox_stat_fops = {
174 .open = spufs_pipe_open,
175 .read = spufs_mbox_stat_read,
176};
177
178/* low-level ibox access function */
179size_t spu_ibox_read(struct spu *spu, u32 *data)
180{
181 int ret;
182
183 spin_lock_irq(&spu->register_lock);
184
185 if (in_be32(&spu->problem->mb_stat_R) & 0xff0000) {
186 /* read the first available word */
187 *data = in_be64(&spu->priv2->puint_mb_R);
188 ret = 4;
189 } else {
190 /* make sure we get woken up by the interrupt */
191 out_be64(&spu->priv1->int_mask_class2_RW,
192 in_be64(&spu->priv1->int_mask_class2_RW) | 0x1);
193 ret = 0;
194 }
195
196 spin_unlock_irq(&spu->register_lock);
197 return ret;
198}
199EXPORT_SYMBOL(spu_ibox_read);
200
201static int spufs_ibox_fasync(int fd, struct file *file, int on)
202{
203 struct spu_context *ctx;
204 ctx = file->private_data;
205 return fasync_helper(fd, file, on, &ctx->spu->ibox_fasync);
206}
207
208static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
209 size_t len, loff_t *pos)
210{
211 struct spu_context *ctx;
212 u32 ibox_data;
213 ssize_t ret;
214
215 if (len < 4)
216 return -EINVAL;
217
218 ctx = file->private_data;
219
220 ret = 0;
221 if (file->f_flags & O_NONBLOCK) {
222 if (!spu_ibox_read(ctx->spu, &ibox_data))
223 ret = -EAGAIN;
224 } else {
225 ret = wait_event_interruptible(ctx->spu->ibox_wq,
226 spu_ibox_read(ctx->spu, &ibox_data));
227 }
228
229 if (ret)
230 return ret;
231
232 ret = 4;
233 if (copy_to_user(buf, &ibox_data, sizeof ibox_data))
234 ret = -EFAULT;
235
236 return ret;
237}
238
239static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
240{
241 struct spu_context *ctx;
242 struct spu_problem __iomem *prob;
243 u32 mbox_stat;
244 unsigned int mask;
245
246 ctx = file->private_data;
247 prob = ctx->spu->problem;
248 mbox_stat = in_be32(&prob->mb_stat_R);
249
250 poll_wait(file, &ctx->spu->ibox_wq, wait);
251
252 mask = 0;
253 if (mbox_stat & 0xff0000)
254 mask |= POLLIN | POLLRDNORM;
255
256 return mask;
257}
258
259static struct file_operations spufs_ibox_fops = {
260 .open = spufs_pipe_open,
261 .read = spufs_ibox_read,
262 .poll = spufs_ibox_poll,
263 .fasync = spufs_ibox_fasync,
264};
265
266static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
267 size_t len, loff_t *pos)
268{
269 struct spu_context *ctx;
270 u32 ibox_stat;
271
272 if (len < 4)
273 return -EINVAL;
274
275 ctx = file->private_data;
276 ibox_stat = (in_be32(&ctx->spu->problem->mb_stat_R) >> 16) & 0xff;
277
278 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
279 return -EFAULT;
280
281 return 4;
282}
283
284static struct file_operations spufs_ibox_stat_fops = {
285 .open = spufs_pipe_open,
286 .read = spufs_ibox_stat_read,
287};
288
289/* low-level mailbox write */
290size_t spu_wbox_write(struct spu *spu, u32 data)
291{
292 int ret;
293
294 spin_lock_irq(&spu->register_lock);
295
296 if (in_be32(&spu->problem->mb_stat_R) & 0x00ff00) {
297 /* we have space to write wbox_data to */
298 out_be32(&spu->problem->spu_mb_W, data);
299 ret = 4;
300 } else {
301 /* make sure we get woken up by the interrupt when space
302 becomes available */
303 out_be64(&spu->priv1->int_mask_class2_RW,
304 in_be64(&spu->priv1->int_mask_class2_RW) | 0x10);
305 ret = 0;
306 }
307
308 spin_unlock_irq(&spu->register_lock);
309 return ret;
310}
311EXPORT_SYMBOL(spu_wbox_write);
312
313static int spufs_wbox_fasync(int fd, struct file *file, int on)
314{
315 struct spu_context *ctx;
316 ctx = file->private_data;
317 return fasync_helper(fd, file, on, &ctx->spu->wbox_fasync);
318}
319
320static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
321 size_t len, loff_t *pos)
322{
323 struct spu_context *ctx;
324 u32 wbox_data;
325 int ret;
326
327 if (len < 4)
328 return -EINVAL;
329
330 ctx = file->private_data;
331
332 if (copy_from_user(&wbox_data, buf, sizeof wbox_data))
333 return -EFAULT;
334
335 ret = 0;
336 if (file->f_flags & O_NONBLOCK) {
337 if (!spu_wbox_write(ctx->spu, wbox_data))
338 ret = -EAGAIN;
339 } else {
340 ret = wait_event_interruptible(ctx->spu->wbox_wq,
341 spu_wbox_write(ctx->spu, wbox_data));
342 }
343
344 return ret ? ret : sizeof wbox_data;
345}
346
347static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
348{
349 struct spu_context *ctx;
350 struct spu_problem __iomem *prob;
351 u32 mbox_stat;
352 unsigned int mask;
353
354 ctx = file->private_data;
355 prob = ctx->spu->problem;
356 mbox_stat = in_be32(&prob->mb_stat_R);
357
358 poll_wait(file, &ctx->spu->wbox_wq, wait);
359
360 mask = 0;
361 if (mbox_stat & 0x00ff00)
362 mask = POLLOUT | POLLWRNORM;
363
364 return mask;
365}
366
367static struct file_operations spufs_wbox_fops = {
368 .open = spufs_pipe_open,
369 .write = spufs_wbox_write,
370 .poll = spufs_wbox_poll,
371 .fasync = spufs_wbox_fasync,
372};
373
374static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
375 size_t len, loff_t *pos)
376{
377 struct spu_context *ctx;
378 u32 wbox_stat;
379
380 if (len < 4)
381 return -EINVAL;
382
383 ctx = file->private_data;
384 wbox_stat = (in_be32(&ctx->spu->problem->mb_stat_R) >> 8) & 0xff;
385
386 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
387 return -EFAULT;
388
389 return 4;
390}
391
392static struct file_operations spufs_wbox_stat_fops = {
393 .open = spufs_pipe_open,
394 .read = spufs_wbox_stat_read,
395};
396
397long spufs_run_spu(struct file *file, struct spu_context *ctx,
398 u32 *npc, u32 *status)
399{
400 struct spu_problem __iomem *prob;
401 int ret;
402
403 if (file->f_flags & O_NONBLOCK) {
404 ret = -EAGAIN;
405 if (!down_write_trylock(&ctx->backing_sema))
406 goto out;
407 } else {
408 down_write(&ctx->backing_sema);
409 }
410
411 prob = ctx->spu->problem;
412 out_be32(&prob->spu_npc_RW, *npc);
413
414 ret = spu_run(ctx->spu);
415
416 *status = in_be32(&prob->spu_status_R);
417 *npc = in_be32(&prob->spu_npc_RW);
418
419 up_write(&ctx->backing_sema);
420
421out:
422 return ret;
423}
424
425static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
426 size_t len, loff_t *pos)
427{
428 struct spu_context *ctx;
429 struct spu_problem *prob;
430 u32 data;
431
432 ctx = file->private_data;
433 prob = ctx->spu->problem;
434
435 if (len < 4)
436 return -EINVAL;
437
438 data = in_be32(&prob->signal_notify1);
439 if (copy_to_user(buf, &data, 4))
440 return -EFAULT;
441
442 return 4;
443}
444
445static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
446 size_t len, loff_t *pos)
447{
448 struct spu_context *ctx;
449 struct spu_problem *prob;
450 u32 data;
451
452 ctx = file->private_data;
453 prob = ctx->spu->problem;
454
455 if (len < 4)
456 return -EINVAL;
457
458 if (copy_from_user(&data, buf, 4))
459 return -EFAULT;
460
461 out_be32(&prob->signal_notify1, data);
462
463 return 4;
464}
465
466static struct file_operations spufs_signal1_fops = {
467 .open = spufs_pipe_open,
468 .read = spufs_signal1_read,
469 .write = spufs_signal1_write,
470};
471
472static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
473 size_t len, loff_t *pos)
474{
475 struct spu_context *ctx;
476 struct spu_problem *prob;
477 u32 data;
478
479 ctx = file->private_data;
480 prob = ctx->spu->problem;
481
482 if (len < 4)
483 return -EINVAL;
484
485 data = in_be32(&prob->signal_notify2);
486 if (copy_to_user(buf, &data, 4))
487 return -EFAULT;
488
489 return 4;
490}
491
492static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
493 size_t len, loff_t *pos)
494{
495 struct spu_context *ctx;
496 struct spu_problem *prob;
497 u32 data;
498
499 ctx = file->private_data;
500 prob = ctx->spu->problem;
501
502 if (len < 4)
503 return -EINVAL;
504
505 if (copy_from_user(&data, buf, 4))
506 return -EFAULT;
507
508 out_be32(&prob->signal_notify2, data);
509
510 return 4;
511}
512
513static struct file_operations spufs_signal2_fops = {
514 .open = spufs_pipe_open,
515 .read = spufs_signal2_read,
516 .write = spufs_signal2_write,
517};
518
519static void spufs_signal1_type_set(void *data, u64 val)
520{
521 struct spu_context *ctx = data;
522 struct spu_priv2 *priv2 = ctx->spu->priv2;
523 u64 tmp;
524
525 spin_lock_irq(&ctx->spu->register_lock);
526 tmp = in_be64(&priv2->spu_cfg_RW);
527 if (val)
528 tmp |= 1;
529 else
530 tmp &= ~1;
531 out_be64(&priv2->spu_cfg_RW, tmp);
532 spin_unlock_irq(&ctx->spu->register_lock);
533}
534
535static u64 spufs_signal1_type_get(void *data)
536{
537 struct spu_context *ctx = data;
538 return (in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0;
539}
540DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
541 spufs_signal1_type_set, "%llu");
542
543static void spufs_signal2_type_set(void *data, u64 val)
544{
545 struct spu_context *ctx = data;
546 struct spu_priv2 *priv2 = ctx->spu->priv2;
547 u64 tmp;
548
549 spin_lock_irq(&ctx->spu->register_lock);
550 tmp = in_be64(&priv2->spu_cfg_RW);
551 if (val)
552 tmp |= 2;
553 else
554 tmp &= ~2;
555 out_be64(&priv2->spu_cfg_RW, tmp);
556 spin_unlock_irq(&ctx->spu->register_lock);
557}
558
559static u64 spufs_signal2_type_get(void *data)
560{
561 struct spu_context *ctx = data;
562 return (in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0;
563}
564DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
565 spufs_signal2_type_set, "%llu");
566
567static void spufs_npc_set(void *data, u64 val)
568{
569 struct spu_context *ctx = data;
570 out_be32(&ctx->spu->problem->spu_npc_RW, val);
571}
572
573static u64 spufs_npc_get(void *data)
574{
575 struct spu_context *ctx = data;
576 u64 ret;
577 ret = in_be32(&ctx->spu->problem->spu_npc_RW);
578 return ret;
579}
580DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
581
582struct tree_descr spufs_dir_contents[] = {
583 { "mem", &spufs_mem_fops, 0666, },
584 { "mbox", &spufs_mbox_fops, 0444, },
585 { "ibox", &spufs_ibox_fops, 0444, },
586 { "wbox", &spufs_wbox_fops, 0222, },
587 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
588 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
589 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
590 { "signal1", &spufs_signal1_fops, 0666, },
591 { "signal2", &spufs_signal2_fops, 0666, },
592 { "signal1_type", &spufs_signal1_type, 0666, },
593 { "signal2_type", &spufs_signal2_type, 0666, },
594 { "npc", &spufs_npc_ops, 0666, },
595 {},
596};
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
new file mode 100644
index 000000000000..f7aa0a6b1ce5
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -0,0 +1,470 @@
1/*
2 * SPU file system
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/backing-dev.h>
26#include <linux/init.h>
27#include <linux/ioctl.h>
28#include <linux/module.h>
29#include <linux/namei.h>
30#include <linux/pagemap.h>
31#include <linux/poll.h>
32#include <linux/slab.h>
33#include <linux/parser.h>
34
35#include <asm/io.h>
36#include <asm/semaphore.h>
37#include <asm/spu.h>
38#include <asm/uaccess.h>
39
40#include "spufs.h"
41
42static kmem_cache_t *spufs_inode_cache;
43
44/* Information about the backing dev, same as ramfs */
45#if 0
46static struct backing_dev_info spufs_backing_dev_info = {
47 .ra_pages = 0, /* No readahead */
48 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK |
49 BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY | BDI_CAP_READ_MAP |
50 BDI_CAP_WRITE_MAP,
51};
52
53static struct address_space_operations spufs_aops = {
54 .readpage = simple_readpage,
55 .prepare_write = simple_prepare_write,
56 .commit_write = simple_commit_write,
57};
58#endif
59
60/* Inode operations */
61
62static struct inode *
63spufs_alloc_inode(struct super_block *sb)
64{
65 struct spufs_inode_info *ei;
66
67 ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
68 if (!ei)
69 return NULL;
70 return &ei->vfs_inode;
71}
72
73static void
74spufs_destroy_inode(struct inode *inode)
75{
76 kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
77}
78
79static void
80spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags)
81{
82 struct spufs_inode_info *ei = p;
83
84 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
85 SLAB_CTOR_CONSTRUCTOR) {
86 inode_init_once(&ei->vfs_inode);
87 }
88}
89
90static struct inode *
91spufs_new_inode(struct super_block *sb, int mode)
92{
93 struct inode *inode;
94
95 inode = new_inode(sb);
96 if (!inode)
97 goto out;
98
99 inode->i_mode = mode;
100 inode->i_uid = current->fsuid;
101 inode->i_gid = current->fsgid;
102 inode->i_blksize = PAGE_CACHE_SIZE;
103 inode->i_blocks = 0;
104 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
105out:
106 return inode;
107}
108
109static int
110spufs_setattr(struct dentry *dentry, struct iattr *attr)
111{
112 struct inode *inode = dentry->d_inode;
113
114/* dump_stack();
115 pr_debug("ia_size %lld, i_size:%lld\n", attr->ia_size, inode->i_size);
116*/
117 if ((attr->ia_valid & ATTR_SIZE) &&
118 (attr->ia_size != inode->i_size))
119 return -EINVAL;
120 return inode_setattr(inode, attr);
121}
122
123
124static int
125spufs_new_file(struct super_block *sb, struct dentry *dentry,
126 struct file_operations *fops, int mode,
127 struct spu_context *ctx)
128{
129 static struct inode_operations spufs_file_iops = {
130 .getattr = simple_getattr,
131 .setattr = spufs_setattr,
132 .unlink = simple_unlink,
133 };
134 struct inode *inode;
135 int ret;
136
137 ret = -ENOSPC;
138 inode = spufs_new_inode(sb, S_IFREG | mode);
139 if (!inode)
140 goto out;
141
142 ret = 0;
143 inode->i_op = &spufs_file_iops;
144 inode->i_fop = fops;
145 inode->u.generic_ip = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
146 d_add(dentry, inode);
147out:
148 return ret;
149}
150
151static void
152spufs_delete_inode(struct inode *inode)
153{
154 if (SPUFS_I(inode)->i_ctx)
155 put_spu_context(SPUFS_I(inode)->i_ctx);
156 clear_inode(inode);
157}
158
159static int
160spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
161 int mode, struct spu_context *ctx)
162{
163 struct dentry *dentry;
164 int ret;
165
166 while (files->name && files->name[0]) {
167 ret = -ENOMEM;
168 dentry = d_alloc_name(dir, files->name);
169 if (!dentry)
170 goto out;
171 ret = spufs_new_file(dir->d_sb, dentry, files->ops,
172 files->mode & mode, ctx);
173 if (ret)
174 goto out;
175 files++;
176 }
177 return 0;
178out:
179 // FIXME: remove all files that are left
180
181 return ret;
182}
183
184static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry)
185{
186 struct dentry *dentry;
187 int err;
188
189 spin_lock(&dcache_lock);
190 /* remove all entries */
191 err = 0;
192 list_for_each_entry(dentry, &dir_dentry->d_subdirs, d_child) {
193 if (d_unhashed(dentry) || !dentry->d_inode)
194 continue;
195 atomic_dec(&dentry->d_count);
196 spin_lock(&dentry->d_lock);
197 __d_drop(dentry);
198 spin_unlock(&dentry->d_lock);
199 }
200 spin_unlock(&dcache_lock);
201 if (!err) {
202 shrink_dcache_parent(dir_dentry);
203 err = simple_rmdir(root, dir_dentry);
204 }
205 return err;
206}
207
208static int spufs_dir_close(struct inode *inode, struct file *file)
209{
210 struct inode *dir;
211 struct dentry *dentry;
212 int ret;
213
214 dentry = file->f_dentry;
215 dir = dentry->d_parent->d_inode;
216 down(&dir->i_sem);
217 ret = spufs_rmdir(dir, file->f_dentry);
218 WARN_ON(ret);
219 up(&dir->i_sem);
220 return dcache_dir_close(inode, file);
221}
222
223struct inode_operations spufs_dir_inode_operations = {
224 .lookup = simple_lookup,
225};
226
227struct file_operations spufs_autodelete_dir_operations = {
228 .open = dcache_dir_open,
229 .release = spufs_dir_close,
230 .llseek = dcache_dir_lseek,
231 .read = generic_read_dir,
232 .readdir = dcache_readdir,
233 .fsync = simple_sync_file,
234};
235
236static int
237spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
238{
239 int ret;
240 struct inode *inode;
241 struct spu_context *ctx;
242
243 ret = -ENOSPC;
244 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
245 if (!inode)
246 goto out;
247
248 if (dir->i_mode & S_ISGID) {
249 inode->i_gid = dir->i_gid;
250 inode->i_mode &= S_ISGID;
251 }
252 ctx = alloc_spu_context();
253 SPUFS_I(inode)->i_ctx = ctx;
254 if (!ctx)
255 goto out_iput;
256
257 inode->i_op = &spufs_dir_inode_operations;
258 inode->i_fop = &simple_dir_operations;
259 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
260 if (ret)
261 goto out_free_ctx;
262
263 d_instantiate(dentry, inode);
264 dget(dentry);
265 dir->i_nlink++;
266 goto out;
267
268out_free_ctx:
269 put_spu_context(ctx);
270out_iput:
271 iput(inode);
272out:
273 return ret;
274}
275
276long
277spufs_create_thread(struct nameidata *nd, const char *name,
278 unsigned int flags, mode_t mode)
279{
280 struct dentry *dentry;
281 struct file *filp;
282 int ret;
283
284 /* need to be at the root of spufs */
285 ret = -EINVAL;
286 if (nd->dentry->d_sb->s_magic != SPUFS_MAGIC ||
287 nd->dentry != nd->dentry->d_sb->s_root)
288 goto out;
289
290 dentry = lookup_create(nd, 1);
291 ret = PTR_ERR(dentry);
292 if (IS_ERR(dentry))
293 goto out_dir;
294
295 ret = -EEXIST;
296 if (dentry->d_inode)
297 goto out_dput;
298
299 mode &= ~current->fs->umask;
300 ret = spufs_mkdir(nd->dentry->d_inode, dentry, mode & S_IRWXUGO);
301 if (ret)
302 goto out_dput;
303
304 ret = get_unused_fd();
305 if (ret < 0)
306 goto out_dput;
307
308 dentry->d_inode->i_nlink++;
309
310 filp = filp_open(name, O_RDONLY, mode);
311 if (IS_ERR(filp)) {
312 // FIXME: remove directory again
313 put_unused_fd(ret);
314 ret = PTR_ERR(filp);
315 } else {
316 filp->f_op = &spufs_autodelete_dir_operations;
317 fd_install(ret, filp);
318 }
319
320out_dput:
321 dput(dentry);
322out_dir:
323 up(&nd->dentry->d_inode->i_sem);
324out:
325 return ret;
326}
327
328/* File system initialization */
329enum {
330 Opt_uid, Opt_gid, Opt_err,
331};
332
333static match_table_t spufs_tokens = {
334 { Opt_uid, "uid=%d" },
335 { Opt_gid, "gid=%d" },
336 { Opt_err, NULL },
337};
338
339static int
340spufs_parse_options(char *options, struct inode *root)
341{
342 char *p;
343 substring_t args[MAX_OPT_ARGS];
344
345 while ((p = strsep(&options, ",")) != NULL) {
346 int token, option;
347
348 if (!*p)
349 continue;
350
351 token = match_token(p, spufs_tokens, args);
352 switch (token) {
353 case Opt_uid:
354 if (match_int(&args[0], &option))
355 return 0;
356 root->i_uid = option;
357 break;
358 case Opt_gid:
359 if (match_int(&args[0], &option))
360 return 0;
361 root->i_gid = option;
362 break;
363 default:
364 return 0;
365 }
366 }
367 return 1;
368}
369
370static int
371spufs_create_root(struct super_block *sb, void *data) {
372 struct inode *inode;
373 int ret;
374
375 ret = -ENOMEM;
376 inode = spufs_new_inode(sb, S_IFDIR | 0775);
377 if (!inode)
378 goto out;
379
380 inode->i_op = &spufs_dir_inode_operations;
381 inode->i_fop = &simple_dir_operations;
382 SPUFS_I(inode)->i_ctx = NULL;
383
384 ret = -EINVAL;
385 if (!spufs_parse_options(data, inode))
386 goto out_iput;
387
388 ret = -ENOMEM;
389 sb->s_root = d_alloc_root(inode);
390 if (!sb->s_root)
391 goto out_iput;
392
393 return 0;
394out_iput:
395 iput(inode);
396out:
397 return ret;
398}
399
400static int
401spufs_fill_super(struct super_block *sb, void *data, int silent)
402{
403 static struct super_operations s_ops = {
404 .alloc_inode = spufs_alloc_inode,
405 .destroy_inode = spufs_destroy_inode,
406 .statfs = simple_statfs,
407 .delete_inode = spufs_delete_inode,
408 .drop_inode = generic_delete_inode,
409 };
410
411 sb->s_maxbytes = MAX_LFS_FILESIZE;
412 sb->s_blocksize = PAGE_CACHE_SIZE;
413 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
414 sb->s_magic = SPUFS_MAGIC;
415 sb->s_op = &s_ops;
416
417 return spufs_create_root(sb, data);
418}
419
420static struct super_block *
421spufs_get_sb(struct file_system_type *fstype, int flags,
422 const char *name, void *data)
423{
424 return get_sb_single(fstype, flags, data, spufs_fill_super);
425}
426
427static struct file_system_type spufs_type = {
428 .owner = THIS_MODULE,
429 .name = "spufs",
430 .get_sb = spufs_get_sb,
431 .kill_sb = kill_litter_super,
432};
433
434static int spufs_init(void)
435{
436 int ret;
437 ret = -ENOMEM;
438 spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
439 sizeof(struct spufs_inode_info), 0,
440 SLAB_HWCACHE_ALIGN, spufs_init_once, NULL);
441
442 if (!spufs_inode_cache)
443 goto out;
444 ret = register_filesystem(&spufs_type);
445 if (ret)
446 goto out_cache;
447 ret = register_spu_syscalls(&spufs_calls);
448 if (ret)
449 goto out_fs;
450 return 0;
451out_fs:
452 unregister_filesystem(&spufs_type);
453out_cache:
454 kmem_cache_destroy(spufs_inode_cache);
455out:
456 return ret;
457}
458module_init(spufs_init);
459
460static void spufs_exit(void)
461{
462 unregister_spu_syscalls(&spufs_calls);
463 unregister_filesystem(&spufs_type);
464 kmem_cache_destroy(spufs_inode_cache);
465}
466module_exit(spufs_exit);
467
468MODULE_LICENSE("GPL");
469MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
470
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
new file mode 100644
index 000000000000..b37fe797ea1c
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -0,0 +1,71 @@
1/*
2 * SPU file system
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22#ifndef SPUFS_H
23#define SPUFS_H
24
25#include <linux/kref.h>
26#include <linux/rwsem.h>
27#include <linux/spinlock.h>
28#include <linux/fs.h>
29
30#include <asm/spu.h>
31
32/* The magic number for our file system */
33enum {
34 SPUFS_MAGIC = 0x23c9b64e,
35};
36
37struct spu_context {
38 struct spu *spu; /* pointer to a physical SPU */
39 struct rw_semaphore backing_sema; /* protects the above */
40 spinlock_t mmio_lock; /* protects mmio access */
41
42 struct kref kref;
43};
44
45struct spufs_inode_info {
46 struct spu_context *i_ctx;
47 struct inode vfs_inode;
48};
49#define SPUFS_I(inode) \
50 container_of(inode, struct spufs_inode_info, vfs_inode)
51
52extern struct tree_descr spufs_dir_contents[];
53
54/* system call implementation */
55long spufs_run_spu(struct file *file,
56 struct spu_context *ctx, u32 *npc, u32 *status);
57long spufs_create_thread(struct nameidata *nd, const char *name,
58 unsigned int flags, mode_t mode);
59
60/* context management */
61struct spu_context * alloc_spu_context(void);
62void destroy_spu_context(struct kref *kref);
63struct spu_context * get_spu_context(struct spu_context *ctx);
64int put_spu_context(struct spu_context *ctx);
65
66void spu_acquire(struct spu_context *ctx);
67void spu_release(struct spu_context *ctx);
68void spu_acquire_runnable(struct spu_context *ctx);
69void spu_acquire_saved(struct spu_context *ctx);
70
71#endif
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
new file mode 100644
index 000000000000..3f71bb5e9d8e
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -0,0 +1,106 @@
1#include <linux/file.h>
2#include <linux/fs.h>
3#include <linux/module.h>
4#include <linux/mount.h>
5#include <linux/namei.h>
6
7#include <asm/uaccess.h>
8
9#include "spufs.h"
10
11/**
12 * sys_spu_run - run code loaded into an SPU
13 *
14 * @unpc: next program counter for the SPU
15 * @ustatus: status of the SPU
16 *
17 * This system call transfers the control of execution of a
18 * user space thread to an SPU. It will return when the
19 * SPU has finished executing or when it hits an error
20 * condition and it will be interrupted if a signal needs
21 * to be delivered to a handler in user space.
22 *
23 * The next program counter is set to the passed value
24 * before the SPU starts fetching code and the user space
25 * pointer gets updated with the new value when returning
26 * from kernel space.
27 *
28 * The status value returned from spu_run reflects the
29 * value of the spu_status register after the SPU has stopped.
30 *
31 */
32long do_spu_run(struct file *filp, __u32 __user *unpc, __u32 __user *ustatus)
33{
34 long ret;
35 struct spufs_inode_info *i;
36 u32 npc, status;
37
38 ret = -EFAULT;
39 if (get_user(npc, unpc))
40 goto out;
41
42 ret = -EINVAL;
43 if (filp->f_vfsmnt->mnt_sb->s_magic != SPUFS_MAGIC)
44 goto out;
45
46 i = SPUFS_I(filp->f_dentry->d_inode);
47 ret = spufs_run_spu(filp, i->i_ctx, &npc, &status);
48
49 if (ret ==-EAGAIN || ret == -EIO)
50 ret = status;
51
52 if (put_user(npc, unpc))
53 ret = -EFAULT;
54
55 if (ustatus && put_user(status, ustatus))
56 ret = -EFAULT;
57out:
58 return ret;
59}
60
61#ifndef MODULE
62asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
63{
64 int fput_needed;
65 struct file *filp;
66 long ret;
67
68 ret = -EBADF;
69 filp = fget_light(fd, &fput_needed);
70 if (filp) {
71 ret = do_spu_run(filp, unpc, ustatus);
72 fput_light(filp, fput_needed);
73 }
74
75 return ret;
76}
77#endif
78
79asmlinkage long sys_spu_create(const char __user *pathname,
80 unsigned int flags, mode_t mode)
81{
82 char *tmp;
83 int ret;
84
85 tmp = getname(pathname);
86 ret = PTR_ERR(tmp);
87 if (!IS_ERR(tmp)) {
88 struct nameidata nd;
89
90 ret = path_lookup(tmp, LOOKUP_PARENT|
91 LOOKUP_OPEN|LOOKUP_CREATE, &nd);
92 if (!ret) {
93 ret = spufs_create_thread(&nd, pathname, flags, mode);
94 path_release(&nd);
95 }
96 putname(tmp);
97 }
98
99 return ret;
100}
101
102struct spufs_calls spufs_calls = {
103 .create_thread = sys_spu_create,
104 .spu_run = do_spu_run,
105 .owner = THIS_MODULE,
106};