aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGeoff Levand <geoffrey.levand@am.sony.com>2006-11-22 18:46:49 -0500
committerPaul Mackerras <paulus@samba.org>2006-12-04 04:40:39 -0500
commite28b003136b5b2f10c25b49c32df9b7742550c23 (patch)
tree86d629c9dc08567c5431b07883c1e860da550df7
parente34226d2cd443a67f46fc531e3a6bc6e03843ce2 (diff)
[POWERPC] cell: abstract spu management routines
This adds a platform specific spu management abstraction and the coresponding routines to support the IBM Cell Blade. It also removes the hypervisor only resources that were included in struct spu. Three new platform specific routines are introduced, spu_enumerate_spus(), spu_create_spu() and spu_destroy_spu(). The underlying design uses a new type, struct spu_management_ops, to hold function pointers that the platform setup code is expected to initialize to instances appropriate to that platform. For the IBM Cell Blade support, I put the hypervisor only resources that were in struct spu into a platform specific data structure struct spu_pdata. Signed-off-by: Geoff Levand <geoffrey.levand@am.sony.com> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c5
-rw-r--r--arch/powerpc/platforms/cell/setup.c3
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c334
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c424
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.h26
-rw-r--r--include/asm-powerpc/spu.h5
-rw-r--r--include/asm-powerpc/spu_priv1.h40
7 files changed, 490 insertions, 347 deletions
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
index 17831a92d91e..616a0a3fd0e2 100644
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -29,6 +29,7 @@
29#include <asm/prom.h> 29#include <asm/prom.h>
30 30
31#include "cbe_regs.h" 31#include "cbe_regs.h"
32#include "spu_priv1_mmio.h"
32 33
33static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev) 34static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev)
34{ 35{
@@ -36,7 +37,7 @@ static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev)
36 37
37 spu = container_of(sysdev, struct spu, sysdev); 38 spu = container_of(sysdev, struct spu, sysdev);
38 39
39 return cbe_get_pmd_regs(spu->devnode); 40 return cbe_get_pmd_regs(spu_devnode(spu));
40} 41}
41 42
42/* returns the value for a given spu in a given register */ 43/* returns the value for a given spu in a given register */
@@ -49,7 +50,7 @@ static u8 spu_read_register_value(struct sys_device *sysdev, union spe_reg __iom
49 /* getting the id from the reg attribute will not work on future device-tree layouts 50 /* getting the id from the reg attribute will not work on future device-tree layouts
50 * in future we should store the id to the spu struct and use it here */ 51 * in future we should store the id to the spu struct and use it here */
51 spu = container_of(sysdev, struct spu, sysdev); 52 spu = container_of(sysdev, struct spu, sysdev);
52 id = (unsigned int *)get_property(spu->devnode, "reg", NULL); 53 id = (unsigned int *)get_property(spu_devnode(spu), "reg", NULL);
53 value.val = in_be64(&reg->val); 54 value.val = in_be64(&reg->val);
54 55
55 return value.spe[*id]; 56 return value.spe[*id];
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 83d5d0c2fafd..36989c2eee66 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -145,7 +145,8 @@ static void __init cell_init_irq(void)
145static void __init cell_setup_arch(void) 145static void __init cell_setup_arch(void)
146{ 146{
147#ifdef CONFIG_SPU_BASE 147#ifdef CONFIG_SPU_BASE
148 spu_priv1_ops = &spu_priv1_mmio_ops; 148 spu_priv1_ops = &spu_priv1_mmio_ops;
149 spu_management_ops = &spu_management_of_ops;
149#endif 150#endif
150 151
151 cbe_regs_init(); 152 cbe_regs_init();
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index d4f4f396288f..841ed359802c 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -25,23 +25,17 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/poll.h>
30#include <linux/ptrace.h> 28#include <linux/ptrace.h>
31#include <linux/slab.h> 29#include <linux/slab.h>
32#include <linux/wait.h> 30#include <linux/wait.h>
33 31#include <linux/mm.h>
34#include <asm/firmware.h> 32#include <linux/io.h>
35#include <asm/io.h>
36#include <asm/prom.h>
37#include <linux/mutex.h> 33#include <linux/mutex.h>
38#include <asm/spu.h> 34#include <asm/spu.h>
39#include <asm/spu_priv1.h> 35#include <asm/spu_priv1.h>
40#include <asm/mmu_context.h>
41#include <asm/xmon.h> 36#include <asm/xmon.h>
42 37
43#include "interrupt.h" 38const struct spu_management_ops *spu_management_ops;
44
45const struct spu_priv1_ops *spu_priv1_ops; 39const struct spu_priv1_ops *spu_priv1_ops;
46 40
47EXPORT_SYMBOL_GPL(spu_priv1_ops); 41EXPORT_SYMBOL_GPL(spu_priv1_ops);
@@ -512,261 +506,6 @@ int spu_irq_class_1_bottom(struct spu *spu)
512 return ret; 506 return ret;
513} 507}
514 508
515static int __init find_spu_node_id(struct device_node *spe)
516{
517 const unsigned int *id;
518 struct device_node *cpu;
519 cpu = spe->parent->parent;
520 id = get_property(cpu, "node-id", NULL);
521 return id ? *id : 0;
522}
523
524static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
525 const char *prop)
526{
527 static DEFINE_MUTEX(add_spumem_mutex);
528
529 const struct address_prop {
530 unsigned long address;
531 unsigned int len;
532 } __attribute__((packed)) *p;
533 int proplen;
534
535 unsigned long start_pfn, nr_pages;
536 struct pglist_data *pgdata;
537 struct zone *zone;
538 int ret;
539
540 p = get_property(spe, prop, &proplen);
541 WARN_ON(proplen != sizeof (*p));
542
543 start_pfn = p->address >> PAGE_SHIFT;
544 nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
545
546 pgdata = NODE_DATA(spu->nid);
547 zone = pgdata->node_zones;
548
549 /* XXX rethink locking here */
550 mutex_lock(&add_spumem_mutex);
551 ret = __add_pages(zone, start_pfn, nr_pages);
552 mutex_unlock(&add_spumem_mutex);
553
554 return ret;
555}
556
557static void __iomem * __init map_spe_prop(struct spu *spu,
558 struct device_node *n, const char *name)
559{
560 const struct address_prop {
561 unsigned long address;
562 unsigned int len;
563 } __attribute__((packed)) *prop;
564
565 const void *p;
566 int proplen;
567 void __iomem *ret = NULL;
568 int err = 0;
569
570 p = get_property(n, name, &proplen);
571 if (proplen != sizeof (struct address_prop))
572 return NULL;
573
574 prop = p;
575
576 err = cell_spuprop_present(spu, n, name);
577 if (err && (err != -EEXIST))
578 goto out;
579
580 ret = ioremap(prop->address, prop->len);
581
582 out:
583 return ret;
584}
585
586static void spu_unmap(struct spu *spu)
587{
588 iounmap(spu->priv2);
589 iounmap(spu->priv1);
590 iounmap(spu->problem);
591 iounmap((__force u8 __iomem *)spu->local_store);
592}
593
594/* This function shall be abstracted for HV platforms */
595static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
596{
597 unsigned int isrc;
598 const u32 *tmp;
599
600 /* Get the interrupt source unit from the device-tree */
601 tmp = get_property(np, "isrc", NULL);
602 if (!tmp)
603 return -ENODEV;
604 isrc = tmp[0];
605
606 /* Add the node number */
607 isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
608
609 /* Now map interrupts of all 3 classes */
610 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
611 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
612 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
613
614 /* Right now, we only fail if class 2 failed */
615 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
616}
617
618static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
619{
620 const char *prop;
621 int ret;
622
623 ret = -ENODEV;
624 spu->name = get_property(node, "name", NULL);
625 if (!spu->name)
626 goto out;
627
628 prop = get_property(node, "local-store", NULL);
629 if (!prop)
630 goto out;
631 spu->local_store_phys = *(unsigned long *)prop;
632
633 /* we use local store as ram, not io memory */
634 spu->local_store = (void __force *)
635 map_spe_prop(spu, node, "local-store");
636 if (!spu->local_store)
637 goto out;
638
639 prop = get_property(node, "problem", NULL);
640 if (!prop)
641 goto out_unmap;
642 spu->problem_phys = *(unsigned long *)prop;
643
644 spu->problem= map_spe_prop(spu, node, "problem");
645 if (!spu->problem)
646 goto out_unmap;
647
648 spu->priv1= map_spe_prop(spu, node, "priv1");
649 /* priv1 is not available on a hypervisor */
650
651 spu->priv2= map_spe_prop(spu, node, "priv2");
652 if (!spu->priv2)
653 goto out_unmap;
654 ret = 0;
655 goto out;
656
657out_unmap:
658 spu_unmap(spu);
659out:
660 return ret;
661}
662
663static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
664{
665 struct of_irq oirq;
666 int ret;
667 int i;
668
669 for (i=0; i < 3; i++) {
670 ret = of_irq_map_one(np, i, &oirq);
671 if (ret) {
672 pr_debug("spu_new: failed to get irq %d\n", i);
673 goto err;
674 }
675 ret = -EINVAL;
676 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
677 oirq.controller->full_name);
678 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
679 oirq.specifier, oirq.size);
680 if (spu->irqs[i] == NO_IRQ) {
681 pr_debug("spu_new: failed to map it !\n");
682 goto err;
683 }
684 }
685 return 0;
686
687err:
688 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
689 for (; i >= 0; i--) {
690 if (spu->irqs[i] != NO_IRQ)
691 irq_dispose_mapping(spu->irqs[i]);
692 }
693 return ret;
694}
695
696static int spu_map_resource(struct device_node *node, int nr,
697 void __iomem** virt, unsigned long *phys)
698{
699 struct resource resource = { };
700 int ret;
701
702 ret = of_address_to_resource(node, nr, &resource);
703 if (ret)
704 goto out;
705
706 if (phys)
707 *phys = resource.start;
708 *virt = ioremap(resource.start, resource.end - resource.start);
709 if (!*virt)
710 ret = -EINVAL;
711
712out:
713 return ret;
714}
715
716static int __init spu_map_device(struct spu *spu, struct device_node *node)
717{
718 int ret = -ENODEV;
719 spu->name = get_property(node, "name", NULL);
720 if (!spu->name)
721 goto out;
722
723 ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
724 &spu->local_store_phys);
725 if (ret) {
726 pr_debug("spu_new: failed to map %s resource 0\n",
727 node->full_name);
728 goto out;
729 }
730 ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
731 &spu->problem_phys);
732 if (ret) {
733 pr_debug("spu_new: failed to map %s resource 1\n",
734 node->full_name);
735 goto out_unmap;
736 }
737 ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
738 NULL);
739 if (ret) {
740 pr_debug("spu_new: failed to map %s resource 2\n",
741 node->full_name);
742 goto out_unmap;
743 }
744
745 if (!firmware_has_feature(FW_FEATURE_LPAR))
746 ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
747 NULL);
748 if (ret) {
749 pr_debug("spu_new: failed to map %s resource 3\n",
750 node->full_name);
751 goto out_unmap;
752 }
753 pr_debug("spu_new: %s maps:\n", node->full_name);
754 pr_debug(" local store : 0x%016lx -> 0x%p\n",
755 spu->local_store_phys, spu->local_store);
756 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
757 spu->problem_phys, spu->problem);
758 pr_debug(" priv2 : 0x%p\n", spu->priv2);
759 pr_debug(" priv1 : 0x%p\n", spu->priv1);
760
761 return 0;
762
763out_unmap:
764 spu_unmap(spu);
765out:
766 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
767 return ret;
768}
769
770struct sysdev_class spu_sysdev_class = { 509struct sysdev_class spu_sysdev_class = {
771 set_kset_name("spu") 510 set_kset_name("spu")
772}; 511};
@@ -846,7 +585,7 @@ static void spu_destroy_sysdev(struct spu *spu)
846 sysdev_unregister(&spu->sysdev); 585 sysdev_unregister(&spu->sysdev);
847} 586}
848 587
849static int __init create_spu(struct device_node *spe) 588static int __init create_spu(void *data)
850{ 589{
851 struct spu *spu; 590 struct spu *spu;
852 int ret; 591 int ret;
@@ -857,60 +596,37 @@ static int __init create_spu(struct device_node *spe)
857 if (!spu) 596 if (!spu)
858 goto out; 597 goto out;
859 598
860 spu->node = find_spu_node_id(spe); 599 spin_lock_init(&spu->register_lock);
861 if (spu->node >= MAX_NUMNODES) { 600 mutex_lock(&spu_mutex);
862 printk(KERN_WARNING "SPE %s on node %d ignored," 601 spu->number = number++;
863 " node number too big\n", spe->full_name, spu->node); 602 mutex_unlock(&spu_mutex);
864 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); 603
865 return -ENODEV; 604 ret = spu_create_spu(spu, data);
866 }
867 spu->nid = of_node_to_nid(spe);
868 if (spu->nid == -1)
869 spu->nid = 0;
870 605
871 ret = spu_map_device(spu, spe);
872 /* try old method */
873 if (ret)
874 ret = spu_map_device_old(spu, spe);
875 if (ret) 606 if (ret)
876 goto out_free; 607 goto out_free;
877 608
878 ret = spu_map_interrupts(spu, spe);
879 if (ret)
880 ret = spu_map_interrupts_old(spu, spe);
881 if (ret)
882 goto out_unmap;
883 spin_lock_init(&spu->register_lock);
884 spu_mfc_sdr_setup(spu); 609 spu_mfc_sdr_setup(spu);
885 spu_mfc_sr1_set(spu, 0x33); 610 spu_mfc_sr1_set(spu, 0x33);
886 mutex_lock(&spu_mutex);
887
888 spu->number = number++;
889 ret = spu_request_irqs(spu); 611 ret = spu_request_irqs(spu);
890 if (ret) 612 if (ret)
891 goto out_unlock; 613 goto out_destroy;
892 614
893 ret = spu_create_sysdev(spu); 615 ret = spu_create_sysdev(spu);
894 if (ret) 616 if (ret)
895 goto out_free_irqs; 617 goto out_free_irqs;
896 618
619 mutex_lock(&spu_mutex);
897 list_add(&spu->list, &spu_list[spu->node]); 620 list_add(&spu->list, &spu_list[spu->node]);
898 list_add(&spu->full_list, &spu_full_list); 621 list_add(&spu->full_list, &spu_full_list);
899 spu->devnode = of_node_get(spe);
900
901 mutex_unlock(&spu_mutex); 622 mutex_unlock(&spu_mutex);
902 623
903 pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n",
904 spu->name, spu->local_store,
905 spu->problem, spu->priv1, spu->priv2, spu->number);
906 goto out; 624 goto out;
907 625
908out_free_irqs: 626out_free_irqs:
909 spu_free_irqs(spu); 627 spu_free_irqs(spu);
910out_unlock: 628out_destroy:
911 mutex_unlock(&spu_mutex); 629 spu_destroy_spu(spu);
912out_unmap:
913 spu_unmap(spu);
914out_free: 630out_free:
915 kfree(spu); 631 kfree(spu);
916out: 632out:
@@ -922,11 +638,9 @@ static void destroy_spu(struct spu *spu)
922 list_del_init(&spu->list); 638 list_del_init(&spu->list);
923 list_del_init(&spu->full_list); 639 list_del_init(&spu->full_list);
924 640
925 of_node_put(spu->devnode);
926
927 spu_destroy_sysdev(spu); 641 spu_destroy_sysdev(spu);
928 spu_free_irqs(spu); 642 spu_free_irqs(spu);
929 spu_unmap(spu); 643 spu_destroy_spu(spu);
930 kfree(spu); 644 kfree(spu);
931} 645}
932 646
@@ -947,7 +661,6 @@ module_exit(cleanup_spu_base);
947 661
948static int __init init_spu_base(void) 662static int __init init_spu_base(void)
949{ 663{
950 struct device_node *node;
951 int i, ret; 664 int i, ret;
952 665
953 /* create sysdev class for spus */ 666 /* create sysdev class for spus */
@@ -958,16 +671,13 @@ static int __init init_spu_base(void)
958 for (i = 0; i < MAX_NUMNODES; i++) 671 for (i = 0; i < MAX_NUMNODES; i++)
959 INIT_LIST_HEAD(&spu_list[i]); 672 INIT_LIST_HEAD(&spu_list[i]);
960 673
961 ret = -ENODEV; 674 ret = spu_enumerate_spus(create_spu);
962 for (node = of_find_node_by_type(NULL, "spe"); 675
963 node; node = of_find_node_by_type(node, "spe")) { 676 if (ret) {
964 ret = create_spu(node); 677 printk(KERN_WARNING "%s: Error initializing spus\n",
965 if (ret) { 678 __FUNCTION__);
966 printk(KERN_WARNING "%s: Error initializing %s\n", 679 cleanup_spu_base();
967 __FUNCTION__, node->name); 680 return ret;
968 cleanup_spu_base();
969 break;
970 }
971 } 681 }
972 682
973 xmon_register_spus(&spu_full_list); 683 xmon_register_spus(&spu_full_list);
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index 90011f9aab3f..a5de0430c56d 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -18,120 +18,498 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20 20
21#include <linux/interrupt.h>
22#include <linux/list.h>
21#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/wait.h>
27#include <linux/mm.h>
28#include <linux/io.h>
29#include <linux/mutex.h>
30#include <linux/device.h>
22 31
23#include <asm/io.h>
24#include <asm/spu.h> 32#include <asm/spu.h>
25#include <asm/spu_priv1.h> 33#include <asm/spu_priv1.h>
34#include <asm/firmware.h>
35#include <asm/prom.h>
26 36
27#include "interrupt.h" 37#include "interrupt.h"
38#include "spu_priv1_mmio.h"
39
40struct spu_pdata {
41 int nid;
42 struct device_node *devnode;
43 struct spu_priv1 __iomem *priv1;
44};
45
46static struct spu_pdata *spu_get_pdata(struct spu *spu)
47{
48 BUG_ON(!spu->pdata);
49 return spu->pdata;
50}
51
52struct device_node *spu_devnode(struct spu *spu)
53{
54 return spu_get_pdata(spu)->devnode;
55}
56
57EXPORT_SYMBOL_GPL(spu_devnode);
58
59static int __init find_spu_node_id(struct device_node *spe)
60{
61 const unsigned int *id;
62 struct device_node *cpu;
63 cpu = spe->parent->parent;
64 id = get_property(cpu, "node-id", NULL);
65 return id ? *id : 0;
66}
67
68static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
69 const char *prop)
70{
71 static DEFINE_MUTEX(add_spumem_mutex);
72
73 const struct address_prop {
74 unsigned long address;
75 unsigned int len;
76 } __attribute__((packed)) *p;
77 int proplen;
78
79 unsigned long start_pfn, nr_pages;
80 struct pglist_data *pgdata;
81 struct zone *zone;
82 int ret;
83
84 p = get_property(spe, prop, &proplen);
85 WARN_ON(proplen != sizeof (*p));
86
87 start_pfn = p->address >> PAGE_SHIFT;
88 nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
89
90 pgdata = NODE_DATA(spu_get_pdata(spu)->nid);
91 zone = pgdata->node_zones;
92
93 /* XXX rethink locking here */
94 mutex_lock(&add_spumem_mutex);
95 ret = __add_pages(zone, start_pfn, nr_pages);
96 mutex_unlock(&add_spumem_mutex);
97
98 return ret;
99}
100
101static void __iomem * __init map_spe_prop(struct spu *spu,
102 struct device_node *n, const char *name)
103{
104 const struct address_prop {
105 unsigned long address;
106 unsigned int len;
107 } __attribute__((packed)) *prop;
108
109 const void *p;
110 int proplen;
111 void __iomem *ret = NULL;
112 int err = 0;
113
114 p = get_property(n, name, &proplen);
115 if (proplen != sizeof (struct address_prop))
116 return NULL;
117
118 prop = p;
119
120 err = cell_spuprop_present(spu, n, name);
121 if (err && (err != -EEXIST))
122 goto out;
123
124 ret = ioremap(prop->address, prop->len);
125
126 out:
127 return ret;
128}
129
130static void spu_unmap(struct spu *spu)
131{
132 iounmap(spu->priv2);
133 iounmap(spu_get_pdata(spu)->priv1);
134 iounmap(spu->problem);
135 iounmap((__force u8 __iomem *)spu->local_store);
136}
137
138static int __init spu_map_interrupts_old(struct spu *spu,
139 struct device_node *np)
140{
141 unsigned int isrc;
142 const u32 *tmp;
143
144 /* Get the interrupt source unit from the device-tree */
145 tmp = get_property(np, "isrc", NULL);
146 if (!tmp)
147 return -ENODEV;
148 isrc = tmp[0];
149
150 /* Add the node number */
151 isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
152
153 /* Now map interrupts of all 3 classes */
154 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
155 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
156 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
157
158 /* Right now, we only fail if class 2 failed */
159 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
160}
161
162static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
163{
164 const char *prop;
165 int ret;
166
167 ret = -ENODEV;
168 spu->name = get_property(node, "name", NULL);
169 if (!spu->name)
170 goto out;
171
172 prop = get_property(node, "local-store", NULL);
173 if (!prop)
174 goto out;
175 spu->local_store_phys = *(unsigned long *)prop;
176
177 /* we use local store as ram, not io memory */
178 spu->local_store = (void __force *)
179 map_spe_prop(spu, node, "local-store");
180 if (!spu->local_store)
181 goto out;
182
183 prop = get_property(node, "problem", NULL);
184 if (!prop)
185 goto out_unmap;
186 spu->problem_phys = *(unsigned long *)prop;
187
188 spu->problem= map_spe_prop(spu, node, "problem");
189 if (!spu->problem)
190 goto out_unmap;
191
192 spu_get_pdata(spu)->priv1= map_spe_prop(spu, node, "priv1");
193
194 spu->priv2= map_spe_prop(spu, node, "priv2");
195 if (!spu->priv2)
196 goto out_unmap;
197 ret = 0;
198 goto out;
199
200out_unmap:
201 spu_unmap(spu);
202out:
203 return ret;
204}
205
206static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
207{
208 struct of_irq oirq;
209 int ret;
210 int i;
211
212 for (i=0; i < 3; i++) {
213 ret = of_irq_map_one(np, i, &oirq);
214 if (ret) {
215 pr_debug("spu_new: failed to get irq %d\n", i);
216 goto err;
217 }
218 ret = -EINVAL;
219 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
220 oirq.controller->full_name);
221 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
222 oirq.specifier, oirq.size);
223 if (spu->irqs[i] == NO_IRQ) {
224 pr_debug("spu_new: failed to map it !\n");
225 goto err;
226 }
227 }
228 return 0;
229
230err:
231 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier,
232 spu->name);
233 for (; i >= 0; i--) {
234 if (spu->irqs[i] != NO_IRQ)
235 irq_dispose_mapping(spu->irqs[i]);
236 }
237 return ret;
238}
239
240static int spu_map_resource(struct device_node *node, int nr,
241 void __iomem** virt, unsigned long *phys)
242{
243 struct resource resource = { };
244 int ret;
245
246 ret = of_address_to_resource(node, nr, &resource);
247 if (ret)
248 goto out;
249
250 if (phys)
251 *phys = resource.start;
252 *virt = ioremap(resource.start, resource.end - resource.start);
253 if (!*virt)
254 ret = -EINVAL;
255
256out:
257 return ret;
258}
259
260static int __init spu_map_device(struct spu *spu, struct device_node *node)
261{
262 int ret = -ENODEV;
263 spu->name = get_property(node, "name", NULL);
264 if (!spu->name)
265 goto out;
266
267 ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
268 &spu->local_store_phys);
269 if (ret) {
270 pr_debug("spu_new: failed to map %s resource 0\n",
271 node->full_name);
272 goto out;
273 }
274 ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
275 &spu->problem_phys);
276 if (ret) {
277 pr_debug("spu_new: failed to map %s resource 1\n",
278 node->full_name);
279 goto out_unmap;
280 }
281 ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
282 NULL);
283 if (ret) {
284 pr_debug("spu_new: failed to map %s resource 2\n",
285 node->full_name);
286 goto out_unmap;
287 }
288 if (!firmware_has_feature(FW_FEATURE_LPAR))
289 ret = spu_map_resource(node, 3,
290 (void __iomem**)&spu_get_pdata(spu)->priv1, NULL);
291 if (ret) {
292 pr_debug("spu_new: failed to map %s resource 3\n",
293 node->full_name);
294 goto out_unmap;
295 }
296 pr_debug("spu_new: %s maps:\n", node->full_name);
297 pr_debug(" local store : 0x%016lx -> 0x%p\n",
298 spu->local_store_phys, spu->local_store);
299 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
300 spu->problem_phys, spu->problem);
301 pr_debug(" priv2 : 0x%p\n", spu->priv2);
302 pr_debug(" priv1 : 0x%p\n",
303 spu_get_pdata(spu)->priv1);
304
305 return 0;
306
307out_unmap:
308 spu_unmap(spu);
309out:
310 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
311 return ret;
312}
313
314static int __init of_enumerate_spus(int (*fn)(void *data))
315{
316 int ret;
317 struct device_node *node;
318
319 ret = -ENODEV;
320 for (node = of_find_node_by_type(NULL, "spe");
321 node; node = of_find_node_by_type(node, "spe")) {
322 ret = fn(node);
323 if (ret) {
324 printk(KERN_WARNING "%s: Error initializing %s\n",
325 __FUNCTION__, node->name);
326 break;
327 }
328 }
329 return ret;
330}
331
332static int __init of_create_spu(struct spu *spu, void *data)
333{
334 int ret;
335 struct device_node *spe = (struct device_node *)data;
336
337 spu->pdata = kzalloc(sizeof(struct spu_pdata),
338 GFP_KERNEL);
339 if (!spu->pdata) {
340 ret = -ENOMEM;
341 goto out;
342 }
343
344 spu->node = find_spu_node_id(spe);
345 if (spu->node >= MAX_NUMNODES) {
346 printk(KERN_WARNING "SPE %s on node %d ignored,"
347 " node number too big\n", spe->full_name, spu->node);
348 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
349 ret = -ENODEV;
350 goto out_free;
351 }
352
353 spu_get_pdata(spu)->nid = of_node_to_nid(spe);
354 if (spu_get_pdata(spu)->nid == -1)
355 spu_get_pdata(spu)->nid = 0;
356
357 ret = spu_map_device(spu, spe);
358 /* try old method */
359 if (ret)
360 ret = spu_map_device_old(spu, spe);
361 if (ret)
362 goto out_free;
363
364 ret = spu_map_interrupts(spu, spe);
365 if (ret)
366 ret = spu_map_interrupts_old(spu, spe);
367 if (ret)
368 goto out_unmap;
369
370 spu_get_pdata(spu)->devnode = of_node_get(spe);
371
372 pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n", spu->name,
373 spu->local_store, spu->problem, spu_get_pdata(spu)->priv1,
374 spu->priv2, spu->number);
375 goto out;
376
377out_unmap:
378 spu_unmap(spu);
379out_free:
380 kfree(spu->pdata);
381 spu->pdata = NULL;
382out:
383 return ret;
384}
385
386static int of_destroy_spu(struct spu *spu)
387{
388 spu_unmap(spu);
389 of_node_put(spu_get_pdata(spu)->devnode);
390 kfree(spu->pdata);
391 spu->pdata = NULL;
392 return 0;
393}
394
395const struct spu_management_ops spu_management_of_ops = {
396 .enumerate_spus = of_enumerate_spus,
397 .create_spu = of_create_spu,
398 .destroy_spu = of_destroy_spu,
399};
28 400
29static void int_mask_and(struct spu *spu, int class, u64 mask) 401static void int_mask_and(struct spu *spu, int class, u64 mask)
30{ 402{
31 u64 old_mask; 403 u64 old_mask;
32 404
33 old_mask = in_be64(&spu->priv1->int_mask_RW[class]); 405 old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
34 out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask); 406 out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
407 old_mask & mask);
35} 408}
36 409
37static void int_mask_or(struct spu *spu, int class, u64 mask) 410static void int_mask_or(struct spu *spu, int class, u64 mask)
38{ 411{
39 u64 old_mask; 412 u64 old_mask;
40 413
41 old_mask = in_be64(&spu->priv1->int_mask_RW[class]); 414 old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
42 out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask); 415 out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
416 old_mask | mask);
43} 417}
44 418
45static void int_mask_set(struct spu *spu, int class, u64 mask) 419static void int_mask_set(struct spu *spu, int class, u64 mask)
46{ 420{
47 out_be64(&spu->priv1->int_mask_RW[class], mask); 421 out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class], mask);
48} 422}
49 423
50static u64 int_mask_get(struct spu *spu, int class) 424static u64 int_mask_get(struct spu *spu, int class)
51{ 425{
52 return in_be64(&spu->priv1->int_mask_RW[class]); 426 return in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
53} 427}
54 428
55static void int_stat_clear(struct spu *spu, int class, u64 stat) 429static void int_stat_clear(struct spu *spu, int class, u64 stat)
56{ 430{
57 out_be64(&spu->priv1->int_stat_RW[class], stat); 431 out_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class], stat);
58} 432}
59 433
60static u64 int_stat_get(struct spu *spu, int class) 434static u64 int_stat_get(struct spu *spu, int class)
61{ 435{
62 return in_be64(&spu->priv1->int_stat_RW[class]); 436 return in_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class]);
63} 437}
64 438
65static void cpu_affinity_set(struct spu *spu, int cpu) 439static void cpu_affinity_set(struct spu *spu, int cpu)
66{ 440{
67 u64 target = iic_get_target_id(cpu); 441 u64 target = iic_get_target_id(cpu);
68 u64 route = target << 48 | target << 32 | target << 16; 442 u64 route = target << 48 | target << 32 | target << 16;
69 out_be64(&spu->priv1->int_route_RW, route); 443 out_be64(&spu_get_pdata(spu)->priv1->int_route_RW, route);
70} 444}
71 445
72static u64 mfc_dar_get(struct spu *spu) 446static u64 mfc_dar_get(struct spu *spu)
73{ 447{
74 return in_be64(&spu->priv1->mfc_dar_RW); 448 return in_be64(&spu_get_pdata(spu)->priv1->mfc_dar_RW);
75} 449}
76 450
77static u64 mfc_dsisr_get(struct spu *spu) 451static u64 mfc_dsisr_get(struct spu *spu)
78{ 452{
79 return in_be64(&spu->priv1->mfc_dsisr_RW); 453 return in_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW);
80} 454}
81 455
82static void mfc_dsisr_set(struct spu *spu, u64 dsisr) 456static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
83{ 457{
84 out_be64(&spu->priv1->mfc_dsisr_RW, dsisr); 458 out_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW, dsisr);
85} 459}
86 460
87static void mfc_sdr_setup(struct spu *spu) 461static void mfc_sdr_setup(struct spu *spu)
88{ 462{
89 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1)); 463 out_be64(&spu_get_pdata(spu)->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
90} 464}
91 465
92static void mfc_sr1_set(struct spu *spu, u64 sr1) 466static void mfc_sr1_set(struct spu *spu, u64 sr1)
93{ 467{
94 out_be64(&spu->priv1->mfc_sr1_RW, sr1); 468 out_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW, sr1);
95} 469}
96 470
97static u64 mfc_sr1_get(struct spu *spu) 471static u64 mfc_sr1_get(struct spu *spu)
98{ 472{
99 return in_be64(&spu->priv1->mfc_sr1_RW); 473 return in_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW);
100} 474}
101 475
102static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id) 476static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
103{ 477{
104 out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id); 478 out_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW, tclass_id);
105} 479}
106 480
107static u64 mfc_tclass_id_get(struct spu *spu) 481static u64 mfc_tclass_id_get(struct spu *spu)
108{ 482{
109 return in_be64(&spu->priv1->mfc_tclass_id_RW); 483 return in_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW);
110} 484}
111 485
112static void tlb_invalidate(struct spu *spu) 486static void tlb_invalidate(struct spu *spu)
113{ 487{
114 out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul); 488 out_be64(&spu_get_pdata(spu)->priv1->tlb_invalidate_entry_W, 0ul);
115} 489}
116 490
117static void resource_allocation_groupID_set(struct spu *spu, u64 id) 491static void resource_allocation_groupID_set(struct spu *spu, u64 id)
118{ 492{
119 out_be64(&spu->priv1->resource_allocation_groupID_RW, id); 493 out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW,
494 id);
120} 495}
121 496
122static u64 resource_allocation_groupID_get(struct spu *spu) 497static u64 resource_allocation_groupID_get(struct spu *spu)
123{ 498{
124 return in_be64(&spu->priv1->resource_allocation_groupID_RW); 499 return in_be64(
500 &spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW);
125} 501}
126 502
127static void resource_allocation_enable_set(struct spu *spu, u64 enable) 503static void resource_allocation_enable_set(struct spu *spu, u64 enable)
128{ 504{
129 out_be64(&spu->priv1->resource_allocation_enable_RW, enable); 505 out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_enable_RW,
506 enable);
130} 507}
131 508
132static u64 resource_allocation_enable_get(struct spu *spu) 509static u64 resource_allocation_enable_get(struct spu *spu)
133{ 510{
134 return in_be64(&spu->priv1->resource_allocation_enable_RW); 511 return in_be64(
512 &spu_get_pdata(spu)->priv1->resource_allocation_enable_RW);
135} 513}
136 514
137const struct spu_priv1_ops spu_priv1_mmio_ops = 515const struct spu_priv1_ops spu_priv1_mmio_ops =
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.h b/arch/powerpc/platforms/cell/spu_priv1_mmio.h
new file mode 100644
index 000000000000..7b62bd1cc256
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.h
@@ -0,0 +1,26 @@
1/*
2 * spu hypervisor abstraction for direct hardware access.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef SPU_PRIV1_MMIO_H
22#define SPU_PRIV1_MMIO_H
23
24struct device_node *spu_devnode(struct spu *spu);
25
26#endif /* SPU_PRIV1_MMIO_H */
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index f968f8697538..fdad4267b447 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -111,13 +111,11 @@ struct spu {
111 u8 *local_store; 111 u8 *local_store;
112 unsigned long problem_phys; 112 unsigned long problem_phys;
113 struct spu_problem __iomem *problem; 113 struct spu_problem __iomem *problem;
114 struct spu_priv1 __iomem *priv1;
115 struct spu_priv2 __iomem *priv2; 114 struct spu_priv2 __iomem *priv2;
116 struct list_head list; 115 struct list_head list;
117 struct list_head sched_list; 116 struct list_head sched_list;
118 struct list_head full_list; 117 struct list_head full_list;
119 int number; 118 int number;
120 int nid;
121 unsigned int irqs[3]; 119 unsigned int irqs[3];
122 u32 node; 120 u32 node;
123 u64 flags; 121 u64 flags;
@@ -144,8 +142,7 @@ struct spu {
144 char irq_c1[8]; 142 char irq_c1[8];
145 char irq_c2[8]; 143 char irq_c2[8];
146 144
147 struct device_node *devnode; 145 void* pdata; /* platform private data */
148
149 struct sys_device sysdev; 146 struct sys_device sysdev;
150}; 147};
151 148
diff --git a/include/asm-powerpc/spu_priv1.h b/include/asm-powerpc/spu_priv1.h
index 4f9a04db99f7..69dcb0c53884 100644
--- a/include/asm-powerpc/spu_priv1.h
+++ b/include/asm-powerpc/spu_priv1.h
@@ -21,12 +21,13 @@
21#define _SPU_PRIV1_H 21#define _SPU_PRIV1_H
22#if defined(__KERNEL__) 22#if defined(__KERNEL__)
23 23
24#include <linux/types.h>
25
24struct spu; 26struct spu;
25 27
26/* access to priv1 registers */ 28/* access to priv1 registers */
27 29
28struct spu_priv1_ops 30struct spu_priv1_ops {
29{
30 void (*int_mask_and) (struct spu *spu, int class, u64 mask); 31 void (*int_mask_and) (struct spu *spu, int class, u64 mask);
31 void (*int_mask_or) (struct spu *spu, int class, u64 mask); 32 void (*int_mask_or) (struct spu *spu, int class, u64 mask);
32 void (*int_mask_set) (struct spu *spu, int class, u64 mask); 33 void (*int_mask_set) (struct spu *spu, int class, u64 mask);
@@ -171,12 +172,41 @@ spu_resource_allocation_enable_get (struct spu *spu)
171 return spu_priv1_ops->resource_allocation_enable_get(spu); 172 return spu_priv1_ops->resource_allocation_enable_get(spu);
172} 173}
173 174
174/* The declarations folowing are put here for convenience 175/* spu management abstraction */
175 * and only intended to be used by the platform setup code 176
176 * for initializing spu_priv1_ops. 177struct spu_management_ops {
178 int (*enumerate_spus)(int (*fn)(void *data));
179 int (*create_spu)(struct spu *spu, void *data);
180 int (*destroy_spu)(struct spu *spu);
181};
182
183extern const struct spu_management_ops* spu_management_ops;
184
185static inline int
186spu_enumerate_spus (int (*fn)(void *data))
187{
188 return spu_management_ops->enumerate_spus(fn);
189}
190
191static inline int
192spu_create_spu (struct spu *spu, void *data)
193{
194 return spu_management_ops->create_spu(spu, data);
195}
196
197static inline int
198spu_destroy_spu (struct spu *spu)
199{
200 return spu_management_ops->destroy_spu(spu);
201}
202
203/*
204 * The declarations folowing are put here for convenience
205 * and only intended to be used by the platform setup code.
177 */ 206 */
178 207
179extern const struct spu_priv1_ops spu_priv1_mmio_ops; 208extern const struct spu_priv1_ops spu_priv1_mmio_ops;
209extern const struct spu_management_ops spu_management_of_ops;
180 210
181#endif /* __KERNEL__ */ 211#endif /* __KERNEL__ */
182#endif 212#endif