aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spu_base.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2005-11-15 15:53:48 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-08 22:49:12 -0500
commit67207b9664a8d603138ef1556141e6d0a102bea7 (patch)
treee98886778be65aeb6625a5f516873bbc5beeb978 /arch/powerpc/platforms/cell/spu_base.c
parentd7a301033f1990188f65abf4fe8e5b90ef0e3888 (diff)
[PATCH] spufs: The SPU file system, base
This is the current version of the spu file system, used for driving SPEs on the Cell Broadband Engine. This release is almost identical to the version for the 2.6.14 kernel posted earlier, which is available as part of the Cell BE Linux distribution from http://www.bsc.es/projects/deepcomputing/linuxoncell/. The first patch provides all the interfaces for running spu application, but does not have any support for debugging SPU tasks or for scheduling. Both these functionalities are added in the subsequent patches. See Documentation/filesystems/spufs.txt on how to use spufs. Signed-off-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spu_base.c')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c740
1 files changed, 740 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
new file mode 100644
index 00000000000..9e9096590a0
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -0,0 +1,740 @@
1/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#define DEBUG 1
24
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/poll.h>
29#include <linux/ptrace.h>
30#include <linux/slab.h>
31#include <linux/wait.h>
32
33#include <asm/io.h>
34#include <asm/prom.h>
35#include <asm/semaphore.h>
36#include <asm/spu.h>
37#include <asm/mmu_context.h>
38
39#include "interrupt.h"
40
41static int __spu_trap_invalid_dma(struct spu *spu)
42{
43 pr_debug("%s\n", __FUNCTION__);
44 force_sig(SIGBUS, /* info, */ current);
45 return 0;
46}
47
48static int __spu_trap_dma_align(struct spu *spu)
49{
50 pr_debug("%s\n", __FUNCTION__);
51 force_sig(SIGBUS, /* info, */ current);
52 return 0;
53}
54
55static int __spu_trap_error(struct spu *spu)
56{
57 pr_debug("%s\n", __FUNCTION__);
58 force_sig(SIGILL, /* info, */ current);
59 return 0;
60}
61
62static void spu_restart_dma(struct spu *spu)
63{
64 struct spu_priv2 __iomem *priv2 = spu->priv2;
65 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
66}
67
68static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
69{
70 struct spu_priv2 __iomem *priv2;
71 struct mm_struct *mm;
72
73 pr_debug("%s\n", __FUNCTION__);
74
75 if (REGION_ID(ea) != USER_REGION_ID) {
76 pr_debug("invalid region access at %016lx\n", ea);
77 return 1;
78 }
79
80 priv2 = spu->priv2;
81 mm = spu->mm;
82
83 if (spu->slb_replace >= 8)
84 spu->slb_replace = 0;
85
86 out_be64(&priv2->slb_index_W, spu->slb_replace);
87 out_be64(&priv2->slb_vsid_RW,
88 (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT)
89 | SLB_VSID_USER);
90 out_be64(&priv2->slb_esid_RW, (ea & ESID_MASK) | SLB_ESID_V);
91
92 spu_restart_dma(spu);
93
94 pr_debug("set slb %d context %lx, ea %016lx, vsid %016lx, esid %016lx\n",
95 spu->slb_replace, mm->context.id, ea,
96 (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT)| SLB_VSID_USER,
97 (ea & ESID_MASK) | SLB_ESID_V);
98 return 0;
99}
100
101static int __spu_trap_data_map(struct spu *spu, unsigned long ea)
102{
103 unsigned long dsisr;
104 struct spu_priv1 __iomem *priv1;
105
106 pr_debug("%s\n", __FUNCTION__);
107 priv1 = spu->priv1;
108 dsisr = in_be64(&priv1->mfc_dsisr_RW);
109
110 wake_up(&spu->stop_wq);
111
112 return 0;
113}
114
115static int __spu_trap_mailbox(struct spu *spu)
116{
117 wake_up_all(&spu->ibox_wq);
118 kill_fasync(&spu->ibox_fasync, SIGIO, POLLIN);
119
120 /* atomically disable SPU mailbox interrupts */
121 spin_lock(&spu->register_lock);
122 out_be64(&spu->priv1->int_mask_class2_RW,
123 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1);
124 spin_unlock(&spu->register_lock);
125 return 0;
126}
127
128static int __spu_trap_stop(struct spu *spu)
129{
130 pr_debug("%s\n", __FUNCTION__);
131 spu->stop_code = in_be32(&spu->problem->spu_status_R);
132 wake_up(&spu->stop_wq);
133 return 0;
134}
135
136static int __spu_trap_halt(struct spu *spu)
137{
138 pr_debug("%s\n", __FUNCTION__);
139 spu->stop_code = in_be32(&spu->problem->spu_status_R);
140 wake_up(&spu->stop_wq);
141 return 0;
142}
143
144static int __spu_trap_tag_group(struct spu *spu)
145{
146 pr_debug("%s\n", __FUNCTION__);
147 /* wake_up(&spu->dma_wq); */
148 return 0;
149}
150
151static int __spu_trap_spubox(struct spu *spu)
152{
153 wake_up_all(&spu->wbox_wq);
154 kill_fasync(&spu->wbox_fasync, SIGIO, POLLOUT);
155
156 /* atomically disable SPU mailbox interrupts */
157 spin_lock(&spu->register_lock);
158 out_be64(&spu->priv1->int_mask_class2_RW,
159 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10);
160 spin_unlock(&spu->register_lock);
161 return 0;
162}
163
164static irqreturn_t
165spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
166{
167 struct spu *spu;
168
169 spu = data;
170 spu->class_0_pending = 1;
171 wake_up(&spu->stop_wq);
172
173 return IRQ_HANDLED;
174}
175
176static int
177spu_irq_class_0_bottom(struct spu *spu)
178{
179 unsigned long stat;
180
181 spu->class_0_pending = 0;
182
183 stat = in_be64(&spu->priv1->int_stat_class0_RW);
184
185 if (stat & 1) /* invalid MFC DMA */
186 __spu_trap_invalid_dma(spu);
187
188 if (stat & 2) /* invalid DMA alignment */
189 __spu_trap_dma_align(spu);
190
191 if (stat & 4) /* error on SPU */
192 __spu_trap_error(spu);
193
194 out_be64(&spu->priv1->int_stat_class0_RW, stat);
195 return 0;
196}
197
198static irqreturn_t
199spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
200{
201 struct spu *spu;
202 unsigned long stat, dar;
203
204 spu = data;
205 stat = in_be64(&spu->priv1->int_stat_class1_RW);
206 dar = in_be64(&spu->priv1->mfc_dar_RW);
207
208 if (stat & 1) /* segment fault */
209 __spu_trap_data_seg(spu, dar);
210
211 if (stat & 2) { /* mapping fault */
212 __spu_trap_data_map(spu, dar);
213 }
214
215 if (stat & 4) /* ls compare & suspend on get */
216 ;
217
218 if (stat & 8) /* ls compare & suspend on put */
219 ;
220
221 out_be64(&spu->priv1->int_stat_class1_RW, stat);
222 return stat ? IRQ_HANDLED : IRQ_NONE;
223}
224
225static irqreturn_t
226spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
227{
228 struct spu *spu;
229 unsigned long stat;
230
231 spu = data;
232 stat = in_be64(&spu->priv1->int_stat_class2_RW);
233
234 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat,
235 in_be64(&spu->priv1->int_mask_class2_RW));
236
237
238 if (stat & 1) /* PPC core mailbox */
239 __spu_trap_mailbox(spu);
240
241 if (stat & 2) /* SPU stop-and-signal */
242 __spu_trap_stop(spu);
243
244 if (stat & 4) /* SPU halted */
245 __spu_trap_halt(spu);
246
247 if (stat & 8) /* DMA tag group complete */
248 __spu_trap_tag_group(spu);
249
250 if (stat & 0x10) /* SPU mailbox threshold */
251 __spu_trap_spubox(spu);
252
253 out_be64(&spu->priv1->int_stat_class2_RW, stat);
254 return stat ? IRQ_HANDLED : IRQ_NONE;
255}
256
257static int
258spu_request_irqs(struct spu *spu)
259{
260 int ret;
261 int irq_base;
262
263 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
264
265 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
266 ret = request_irq(irq_base + spu->isrc,
267 spu_irq_class_0, 0, spu->irq_c0, spu);
268 if (ret)
269 goto out;
270 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
271
272 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
273 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
274 spu_irq_class_1, 0, spu->irq_c1, spu);
275 if (ret)
276 goto out1;
277 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
278
279 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
280 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
281 spu_irq_class_2, 0, spu->irq_c2, spu);
282 if (ret)
283 goto out2;
284 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
285 goto out;
286
287out2:
288 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
289out1:
290 free_irq(irq_base + spu->isrc, spu);
291out:
292 return ret;
293}
294
295static void
296spu_free_irqs(struct spu *spu)
297{
298 int irq_base;
299
300 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
301
302 free_irq(irq_base + spu->isrc, spu);
303 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
304 free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
305}
306
307static LIST_HEAD(spu_list);
308static DECLARE_MUTEX(spu_mutex);
309
310static void spu_init_channels(struct spu *spu)
311{
312 static const struct {
313 unsigned channel;
314 unsigned count;
315 } zero_list[] = {
316 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
317 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
318 }, count_list[] = {
319 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
320 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
321 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
322 };
323 struct spu_priv2 *priv2;
324 int i;
325
326 priv2 = spu->priv2;
327
328 /* initialize all channel data to zero */
329 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
330 int count;
331
332 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
333 for (count = 0; count < zero_list[i].count; count++)
334 out_be64(&priv2->spu_chnldata_RW, 0);
335 }
336
337 /* initialize channel counts to meaningful values */
338 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
339 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
340 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
341 }
342}
343
344static void spu_init_regs(struct spu *spu)
345{
346 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
347 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
348 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
349}
350
351struct spu *spu_alloc(void)
352{
353 struct spu *spu;
354
355 down(&spu_mutex);
356 if (!list_empty(&spu_list)) {
357 spu = list_entry(spu_list.next, struct spu, list);
358 list_del_init(&spu->list);
359 pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
360 } else {
361 pr_debug("No SPU left\n");
362 spu = NULL;
363 }
364 up(&spu_mutex);
365
366 if (spu) {
367 spu_init_channels(spu);
368 spu_init_regs(spu);
369 }
370
371 return spu;
372}
373EXPORT_SYMBOL(spu_alloc);
374
375void spu_free(struct spu *spu)
376{
377 down(&spu_mutex);
378 spu->ibox_fasync = NULL;
379 spu->wbox_fasync = NULL;
380 list_add_tail(&spu->list, &spu_list);
381 up(&spu_mutex);
382}
383EXPORT_SYMBOL(spu_free);
384
385extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
386static int spu_handle_mm_fault(struct spu *spu)
387{
388 struct spu_priv1 __iomem *priv1;
389 struct mm_struct *mm = spu->mm;
390 struct vm_area_struct *vma;
391 u64 ea, dsisr, is_write;
392 int ret;
393
394 priv1 = spu->priv1;
395 ea = in_be64(&priv1->mfc_dar_RW);
396 dsisr = in_be64(&priv1->mfc_dsisr_RW);
397#if 0
398 if (!IS_VALID_EA(ea)) {
399 return -EFAULT;
400 }
401#endif /* XXX */
402 if (mm == NULL) {
403 return -EFAULT;
404 }
405 if (mm->pgd == NULL) {
406 return -EFAULT;
407 }
408
409 down_read(&mm->mmap_sem);
410 vma = find_vma(mm, ea);
411 if (!vma)
412 goto bad_area;
413 if (vma->vm_start <= ea)
414 goto good_area;
415 if (!(vma->vm_flags & VM_GROWSDOWN))
416 goto bad_area;
417#if 0
418 if (expand_stack(vma, ea))
419 goto bad_area;
420#endif /* XXX */
421good_area:
422 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
423 if (is_write) {
424 if (!(vma->vm_flags & VM_WRITE))
425 goto bad_area;
426 } else {
427 if (dsisr & MFC_DSISR_ACCESS_DENIED)
428 goto bad_area;
429 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
430 goto bad_area;
431 }
432 ret = 0;
433 switch (handle_mm_fault(mm, vma, ea, is_write)) {
434 case VM_FAULT_MINOR:
435 current->min_flt++;
436 break;
437 case VM_FAULT_MAJOR:
438 current->maj_flt++;
439 break;
440 case VM_FAULT_SIGBUS:
441 ret = -EFAULT;
442 goto bad_area;
443 case VM_FAULT_OOM:
444 ret = -ENOMEM;
445 goto bad_area;
446 default:
447 BUG();
448 }
449 up_read(&mm->mmap_sem);
450 return ret;
451
452bad_area:
453 up_read(&mm->mmap_sem);
454 return -EFAULT;
455}
456
457static int spu_handle_pte_fault(struct spu *spu)
458{
459 struct spu_priv1 __iomem *priv1;
460 u64 ea, dsisr, access, error = 0UL;
461 int ret = 0;
462
463 priv1 = spu->priv1;
464 ea = in_be64(&priv1->mfc_dar_RW);
465 dsisr = in_be64(&priv1->mfc_dsisr_RW);
466 access = (_PAGE_PRESENT | _PAGE_USER);
467 if (dsisr & MFC_DSISR_PTE_NOT_FOUND) {
468 if (hash_page(ea, access, 0x300) != 0)
469 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
470 }
471 if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) ||
472 (dsisr & MFC_DSISR_ACCESS_DENIED)) {
473 if ((ret = spu_handle_mm_fault(spu)) != 0)
474 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
475 else
476 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
477 }
478 if (!error)
479 spu_restart_dma(spu);
480
481 return ret;
482}
483
484int spu_run(struct spu *spu)
485{
486 struct spu_problem __iomem *prob;
487 struct spu_priv1 __iomem *priv1;
488 struct spu_priv2 __iomem *priv2;
489 unsigned long status;
490 int ret;
491
492 prob = spu->problem;
493 priv1 = spu->priv1;
494 priv2 = spu->priv2;
495
496 /* Let SPU run. */
497 spu->mm = current->mm;
498 eieio();
499 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
500
501 do {
502 ret = wait_event_interruptible(spu->stop_wq,
503 (!((status = in_be32(&prob->spu_status_R)) & 0x1))
504 || (in_be64(&priv1->mfc_dsisr_RW) & MFC_DSISR_PTE_NOT_FOUND)
505 || spu->class_0_pending);
506
507 if (status & SPU_STATUS_STOPPED_BY_STOP)
508 ret = -EAGAIN;
509 else if (status & SPU_STATUS_STOPPED_BY_HALT)
510 ret = -EIO;
511 else if (in_be64(&priv1->mfc_dsisr_RW) & MFC_DSISR_PTE_NOT_FOUND)
512 ret = spu_handle_pte_fault(spu);
513
514 if (spu->class_0_pending)
515 spu_irq_class_0_bottom(spu);
516
517 if (!ret && signal_pending(current))
518 ret = -ERESTARTSYS;
519
520 } while (!ret);
521
522 /* Ensure SPU is stopped. */
523 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
524 eieio();
525 while (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)
526 cpu_relax();
527
528 out_be64(&priv2->slb_invalidate_all_W, 0);
529 out_be64(&priv1->tlb_invalidate_entry_W, 0UL);
530 eieio();
531
532 spu->mm = NULL;
533
534 /* Check for SPU breakpoint. */
535 if (unlikely(current->ptrace & PT_PTRACED)) {
536 status = in_be32(&prob->spu_status_R);
537
538 if ((status & SPU_STATUS_STOPPED_BY_STOP)
539 && status >> SPU_STOP_STATUS_SHIFT == 0x3fff) {
540 force_sig(SIGTRAP, current);
541 ret = -ERESTARTSYS;
542 }
543 }
544
545 return ret;
546}
547EXPORT_SYMBOL(spu_run);
548
549static void __iomem * __init map_spe_prop(struct device_node *n,
550 const char *name)
551{
552 struct address_prop {
553 unsigned long address;
554 unsigned int len;
555 } __attribute__((packed)) *prop;
556
557 void *p;
558 int proplen;
559
560 p = get_property(n, name, &proplen);
561 if (proplen != sizeof (struct address_prop))
562 return NULL;
563
564 prop = p;
565
566 return ioremap(prop->address, prop->len);
567}
568
569static void spu_unmap(struct spu *spu)
570{
571 iounmap(spu->priv2);
572 iounmap(spu->priv1);
573 iounmap(spu->problem);
574 iounmap((u8 __iomem *)spu->local_store);
575}
576
577static int __init spu_map_device(struct spu *spu, struct device_node *spe)
578{
579 char *prop;
580 int ret;
581
582 ret = -ENODEV;
583 prop = get_property(spe, "isrc", NULL);
584 if (!prop)
585 goto out;
586 spu->isrc = *(unsigned int *)prop;
587
588 spu->name = get_property(spe, "name", NULL);
589 if (!spu->name)
590 goto out;
591
592 prop = get_property(spe, "local-store", NULL);
593 if (!prop)
594 goto out;
595 spu->local_store_phys = *(unsigned long *)prop;
596
597 /* we use local store as ram, not io memory */
598 spu->local_store = (void __force *)map_spe_prop(spe, "local-store");
599 if (!spu->local_store)
600 goto out;
601
602 spu->problem= map_spe_prop(spe, "problem");
603 if (!spu->problem)
604 goto out_unmap;
605
606 spu->priv1= map_spe_prop(spe, "priv1");
607 if (!spu->priv1)
608 goto out_unmap;
609
610 spu->priv2= map_spe_prop(spe, "priv2");
611 if (!spu->priv2)
612 goto out_unmap;
613 ret = 0;
614 goto out;
615
616out_unmap:
617 spu_unmap(spu);
618out:
619 return ret;
620}
621
622static int __init find_spu_node_id(struct device_node *spe)
623{
624 unsigned int *id;
625 struct device_node *cpu;
626
627 cpu = spe->parent->parent;
628 id = (unsigned int *)get_property(cpu, "node-id", NULL);
629
630 return id ? *id : 0;
631}
632
633static int __init create_spu(struct device_node *spe)
634{
635 struct spu *spu;
636 int ret;
637 static int number;
638
639 ret = -ENOMEM;
640 spu = kmalloc(sizeof (*spu), GFP_KERNEL);
641 if (!spu)
642 goto out;
643
644 ret = spu_map_device(spu, spe);
645 if (ret)
646 goto out_free;
647
648 spu->node = find_spu_node_id(spe);
649 spu->stop_code = 0;
650 spu->slb_replace = 0;
651 spu->mm = NULL;
652 spu->class_0_pending = 0;
653 spin_lock_init(&spu->register_lock);
654
655 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
656 out_be64(&spu->priv1->mfc_sr1_RW, 0x33);
657
658 init_waitqueue_head(&spu->stop_wq);
659 init_waitqueue_head(&spu->wbox_wq);
660 init_waitqueue_head(&spu->ibox_wq);
661
662 spu->ibox_fasync = NULL;
663 spu->wbox_fasync = NULL;
664
665 down(&spu_mutex);
666 spu->number = number++;
667 ret = spu_request_irqs(spu);
668 if (ret)
669 goto out_unmap;
670
671 list_add(&spu->list, &spu_list);
672 up(&spu_mutex);
673
674 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
675 spu->name, spu->isrc, spu->local_store,
676 spu->problem, spu->priv1, spu->priv2, spu->number);
677 goto out;
678
679out_unmap:
680 up(&spu_mutex);
681 spu_unmap(spu);
682out_free:
683 kfree(spu);
684out:
685 return ret;
686}
687
688static void destroy_spu(struct spu *spu)
689{
690 list_del_init(&spu->list);
691
692 spu_free_irqs(spu);
693 spu_unmap(spu);
694 kfree(spu);
695}
696
697static void cleanup_spu_base(void)
698{
699 struct spu *spu, *tmp;
700 down(&spu_mutex);
701 list_for_each_entry_safe(spu, tmp, &spu_list, list)
702 destroy_spu(spu);
703 up(&spu_mutex);
704}
705module_exit(cleanup_spu_base);
706
707static int __init init_spu_base(void)
708{
709 struct device_node *node;
710 int ret;
711
712 ret = -ENODEV;
713 for (node = of_find_node_by_type(NULL, "spe");
714 node; node = of_find_node_by_type(node, "spe")) {
715 ret = create_spu(node);
716 if (ret) {
717 printk(KERN_WARNING "%s: Error initializing %s\n",
718 __FUNCTION__, node->name);
719 cleanup_spu_base();
720 break;
721 }
722 }
723 /* in some old firmware versions, the spe is called 'spc', so we
724 look for that as well */
725 for (node = of_find_node_by_type(NULL, "spc");
726 node; node = of_find_node_by_type(node, "spc")) {
727 ret = create_spu(node);
728 if (ret) {
729 printk(KERN_WARNING "%s: Error initializing %s\n",
730 __FUNCTION__, node->name);
731 cleanup_spu_base();
732 break;
733 }
734 }
735 return ret;
736}
737module_init(init_spu_base);
738
739MODULE_LICENSE("GPL");
740MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");