aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spu_base.c
diff options
context:
space:
mode:
authorGeoff Levand <geoffrey.levand@am.sony.com>2006-11-22 18:46:49 -0500
committerPaul Mackerras <paulus@samba.org>2006-12-04 04:40:39 -0500
commite28b003136b5b2f10c25b49c32df9b7742550c23 (patch)
tree86d629c9dc08567c5431b07883c1e860da550df7 /arch/powerpc/platforms/cell/spu_base.c
parente34226d2cd443a67f46fc531e3a6bc6e03843ce2 (diff)
[POWERPC] cell: abstract spu management routines
This adds a platform specific spu management abstraction and the coresponding routines to support the IBM Cell Blade. It also removes the hypervisor only resources that were included in struct spu. Three new platform specific routines are introduced, spu_enumerate_spus(), spu_create_spu() and spu_destroy_spu(). The underlying design uses a new type, struct spu_management_ops, to hold function pointers that the platform setup code is expected to initialize to instances appropriate to that platform. For the IBM Cell Blade support, I put the hypervisor only resources that were in struct spu into a platform specific data structure struct spu_pdata. Signed-off-by: Geoff Levand <geoffrey.levand@am.sony.com> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc/platforms/cell/spu_base.c')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c334
1 files changed, 22 insertions, 312 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index d4f4f396288f..841ed359802c 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -25,23 +25,17 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/poll.h>
30#include <linux/ptrace.h> 28#include <linux/ptrace.h>
31#include <linux/slab.h> 29#include <linux/slab.h>
32#include <linux/wait.h> 30#include <linux/wait.h>
33 31#include <linux/mm.h>
34#include <asm/firmware.h> 32#include <linux/io.h>
35#include <asm/io.h>
36#include <asm/prom.h>
37#include <linux/mutex.h> 33#include <linux/mutex.h>
38#include <asm/spu.h> 34#include <asm/spu.h>
39#include <asm/spu_priv1.h> 35#include <asm/spu_priv1.h>
40#include <asm/mmu_context.h>
41#include <asm/xmon.h> 36#include <asm/xmon.h>
42 37
43#include "interrupt.h" 38const struct spu_management_ops *spu_management_ops;
44
45const struct spu_priv1_ops *spu_priv1_ops; 39const struct spu_priv1_ops *spu_priv1_ops;
46 40
47EXPORT_SYMBOL_GPL(spu_priv1_ops); 41EXPORT_SYMBOL_GPL(spu_priv1_ops);
@@ -512,261 +506,6 @@ int spu_irq_class_1_bottom(struct spu *spu)
512 return ret; 506 return ret;
513} 507}
514 508
515static int __init find_spu_node_id(struct device_node *spe)
516{
517 const unsigned int *id;
518 struct device_node *cpu;
519 cpu = spe->parent->parent;
520 id = get_property(cpu, "node-id", NULL);
521 return id ? *id : 0;
522}
523
524static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
525 const char *prop)
526{
527 static DEFINE_MUTEX(add_spumem_mutex);
528
529 const struct address_prop {
530 unsigned long address;
531 unsigned int len;
532 } __attribute__((packed)) *p;
533 int proplen;
534
535 unsigned long start_pfn, nr_pages;
536 struct pglist_data *pgdata;
537 struct zone *zone;
538 int ret;
539
540 p = get_property(spe, prop, &proplen);
541 WARN_ON(proplen != sizeof (*p));
542
543 start_pfn = p->address >> PAGE_SHIFT;
544 nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
545
546 pgdata = NODE_DATA(spu->nid);
547 zone = pgdata->node_zones;
548
549 /* XXX rethink locking here */
550 mutex_lock(&add_spumem_mutex);
551 ret = __add_pages(zone, start_pfn, nr_pages);
552 mutex_unlock(&add_spumem_mutex);
553
554 return ret;
555}
556
557static void __iomem * __init map_spe_prop(struct spu *spu,
558 struct device_node *n, const char *name)
559{
560 const struct address_prop {
561 unsigned long address;
562 unsigned int len;
563 } __attribute__((packed)) *prop;
564
565 const void *p;
566 int proplen;
567 void __iomem *ret = NULL;
568 int err = 0;
569
570 p = get_property(n, name, &proplen);
571 if (proplen != sizeof (struct address_prop))
572 return NULL;
573
574 prop = p;
575
576 err = cell_spuprop_present(spu, n, name);
577 if (err && (err != -EEXIST))
578 goto out;
579
580 ret = ioremap(prop->address, prop->len);
581
582 out:
583 return ret;
584}
585
586static void spu_unmap(struct spu *spu)
587{
588 iounmap(spu->priv2);
589 iounmap(spu->priv1);
590 iounmap(spu->problem);
591 iounmap((__force u8 __iomem *)spu->local_store);
592}
593
594/* This function shall be abstracted for HV platforms */
595static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
596{
597 unsigned int isrc;
598 const u32 *tmp;
599
600 /* Get the interrupt source unit from the device-tree */
601 tmp = get_property(np, "isrc", NULL);
602 if (!tmp)
603 return -ENODEV;
604 isrc = tmp[0];
605
606 /* Add the node number */
607 isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
608
609 /* Now map interrupts of all 3 classes */
610 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
611 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
612 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
613
614 /* Right now, we only fail if class 2 failed */
615 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
616}
617
618static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
619{
620 const char *prop;
621 int ret;
622
623 ret = -ENODEV;
624 spu->name = get_property(node, "name", NULL);
625 if (!spu->name)
626 goto out;
627
628 prop = get_property(node, "local-store", NULL);
629 if (!prop)
630 goto out;
631 spu->local_store_phys = *(unsigned long *)prop;
632
633 /* we use local store as ram, not io memory */
634 spu->local_store = (void __force *)
635 map_spe_prop(spu, node, "local-store");
636 if (!spu->local_store)
637 goto out;
638
639 prop = get_property(node, "problem", NULL);
640 if (!prop)
641 goto out_unmap;
642 spu->problem_phys = *(unsigned long *)prop;
643
644 spu->problem= map_spe_prop(spu, node, "problem");
645 if (!spu->problem)
646 goto out_unmap;
647
648 spu->priv1= map_spe_prop(spu, node, "priv1");
649 /* priv1 is not available on a hypervisor */
650
651 spu->priv2= map_spe_prop(spu, node, "priv2");
652 if (!spu->priv2)
653 goto out_unmap;
654 ret = 0;
655 goto out;
656
657out_unmap:
658 spu_unmap(spu);
659out:
660 return ret;
661}
662
663static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
664{
665 struct of_irq oirq;
666 int ret;
667 int i;
668
669 for (i=0; i < 3; i++) {
670 ret = of_irq_map_one(np, i, &oirq);
671 if (ret) {
672 pr_debug("spu_new: failed to get irq %d\n", i);
673 goto err;
674 }
675 ret = -EINVAL;
676 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
677 oirq.controller->full_name);
678 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
679 oirq.specifier, oirq.size);
680 if (spu->irqs[i] == NO_IRQ) {
681 pr_debug("spu_new: failed to map it !\n");
682 goto err;
683 }
684 }
685 return 0;
686
687err:
688 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
689 for (; i >= 0; i--) {
690 if (spu->irqs[i] != NO_IRQ)
691 irq_dispose_mapping(spu->irqs[i]);
692 }
693 return ret;
694}
695
696static int spu_map_resource(struct device_node *node, int nr,
697 void __iomem** virt, unsigned long *phys)
698{
699 struct resource resource = { };
700 int ret;
701
702 ret = of_address_to_resource(node, nr, &resource);
703 if (ret)
704 goto out;
705
706 if (phys)
707 *phys = resource.start;
708 *virt = ioremap(resource.start, resource.end - resource.start);
709 if (!*virt)
710 ret = -EINVAL;
711
712out:
713 return ret;
714}
715
716static int __init spu_map_device(struct spu *spu, struct device_node *node)
717{
718 int ret = -ENODEV;
719 spu->name = get_property(node, "name", NULL);
720 if (!spu->name)
721 goto out;
722
723 ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
724 &spu->local_store_phys);
725 if (ret) {
726 pr_debug("spu_new: failed to map %s resource 0\n",
727 node->full_name);
728 goto out;
729 }
730 ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
731 &spu->problem_phys);
732 if (ret) {
733 pr_debug("spu_new: failed to map %s resource 1\n",
734 node->full_name);
735 goto out_unmap;
736 }
737 ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
738 NULL);
739 if (ret) {
740 pr_debug("spu_new: failed to map %s resource 2\n",
741 node->full_name);
742 goto out_unmap;
743 }
744
745 if (!firmware_has_feature(FW_FEATURE_LPAR))
746 ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
747 NULL);
748 if (ret) {
749 pr_debug("spu_new: failed to map %s resource 3\n",
750 node->full_name);
751 goto out_unmap;
752 }
753 pr_debug("spu_new: %s maps:\n", node->full_name);
754 pr_debug(" local store : 0x%016lx -> 0x%p\n",
755 spu->local_store_phys, spu->local_store);
756 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
757 spu->problem_phys, spu->problem);
758 pr_debug(" priv2 : 0x%p\n", spu->priv2);
759 pr_debug(" priv1 : 0x%p\n", spu->priv1);
760
761 return 0;
762
763out_unmap:
764 spu_unmap(spu);
765out:
766 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
767 return ret;
768}
769
770struct sysdev_class spu_sysdev_class = { 509struct sysdev_class spu_sysdev_class = {
771 set_kset_name("spu") 510 set_kset_name("spu")
772}; 511};
@@ -846,7 +585,7 @@ static void spu_destroy_sysdev(struct spu *spu)
846 sysdev_unregister(&spu->sysdev); 585 sysdev_unregister(&spu->sysdev);
847} 586}
848 587
849static int __init create_spu(struct device_node *spe) 588static int __init create_spu(void *data)
850{ 589{
851 struct spu *spu; 590 struct spu *spu;
852 int ret; 591 int ret;
@@ -857,60 +596,37 @@ static int __init create_spu(struct device_node *spe)
857 if (!spu) 596 if (!spu)
858 goto out; 597 goto out;
859 598
860 spu->node = find_spu_node_id(spe); 599 spin_lock_init(&spu->register_lock);
861 if (spu->node >= MAX_NUMNODES) { 600 mutex_lock(&spu_mutex);
862 printk(KERN_WARNING "SPE %s on node %d ignored," 601 spu->number = number++;
863 " node number too big\n", spe->full_name, spu->node); 602 mutex_unlock(&spu_mutex);
864 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); 603
865 return -ENODEV; 604 ret = spu_create_spu(spu, data);
866 }
867 spu->nid = of_node_to_nid(spe);
868 if (spu->nid == -1)
869 spu->nid = 0;
870 605
871 ret = spu_map_device(spu, spe);
872 /* try old method */
873 if (ret)
874 ret = spu_map_device_old(spu, spe);
875 if (ret) 606 if (ret)
876 goto out_free; 607 goto out_free;
877 608
878 ret = spu_map_interrupts(spu, spe);
879 if (ret)
880 ret = spu_map_interrupts_old(spu, spe);
881 if (ret)
882 goto out_unmap;
883 spin_lock_init(&spu->register_lock);
884 spu_mfc_sdr_setup(spu); 609 spu_mfc_sdr_setup(spu);
885 spu_mfc_sr1_set(spu, 0x33); 610 spu_mfc_sr1_set(spu, 0x33);
886 mutex_lock(&spu_mutex);
887
888 spu->number = number++;
889 ret = spu_request_irqs(spu); 611 ret = spu_request_irqs(spu);
890 if (ret) 612 if (ret)
891 goto out_unlock; 613 goto out_destroy;
892 614
893 ret = spu_create_sysdev(spu); 615 ret = spu_create_sysdev(spu);
894 if (ret) 616 if (ret)
895 goto out_free_irqs; 617 goto out_free_irqs;
896 618
619 mutex_lock(&spu_mutex);
897 list_add(&spu->list, &spu_list[spu->node]); 620 list_add(&spu->list, &spu_list[spu->node]);
898 list_add(&spu->full_list, &spu_full_list); 621 list_add(&spu->full_list, &spu_full_list);
899 spu->devnode = of_node_get(spe);
900
901 mutex_unlock(&spu_mutex); 622 mutex_unlock(&spu_mutex);
902 623
903 pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n",
904 spu->name, spu->local_store,
905 spu->problem, spu->priv1, spu->priv2, spu->number);
906 goto out; 624 goto out;
907 625
908out_free_irqs: 626out_free_irqs:
909 spu_free_irqs(spu); 627 spu_free_irqs(spu);
910out_unlock: 628out_destroy:
911 mutex_unlock(&spu_mutex); 629 spu_destroy_spu(spu);
912out_unmap:
913 spu_unmap(spu);
914out_free: 630out_free:
915 kfree(spu); 631 kfree(spu);
916out: 632out:
@@ -922,11 +638,9 @@ static void destroy_spu(struct spu *spu)
922 list_del_init(&spu->list); 638 list_del_init(&spu->list);
923 list_del_init(&spu->full_list); 639 list_del_init(&spu->full_list);
924 640
925 of_node_put(spu->devnode);
926
927 spu_destroy_sysdev(spu); 641 spu_destroy_sysdev(spu);
928 spu_free_irqs(spu); 642 spu_free_irqs(spu);
929 spu_unmap(spu); 643 spu_destroy_spu(spu);
930 kfree(spu); 644 kfree(spu);
931} 645}
932 646
@@ -947,7 +661,6 @@ module_exit(cleanup_spu_base);
947 661
948static int __init init_spu_base(void) 662static int __init init_spu_base(void)
949{ 663{
950 struct device_node *node;
951 int i, ret; 664 int i, ret;
952 665
953 /* create sysdev class for spus */ 666 /* create sysdev class for spus */
@@ -958,16 +671,13 @@ static int __init init_spu_base(void)
958 for (i = 0; i < MAX_NUMNODES; i++) 671 for (i = 0; i < MAX_NUMNODES; i++)
959 INIT_LIST_HEAD(&spu_list[i]); 672 INIT_LIST_HEAD(&spu_list[i]);
960 673
961 ret = -ENODEV; 674 ret = spu_enumerate_spus(create_spu);
962 for (node = of_find_node_by_type(NULL, "spe"); 675
963 node; node = of_find_node_by_type(node, "spe")) { 676 if (ret) {
964 ret = create_spu(node); 677 printk(KERN_WARNING "%s: Error initializing spus\n",
965 if (ret) { 678 __FUNCTION__);
966 printk(KERN_WARNING "%s: Error initializing %s\n", 679 cleanup_spu_base();
967 __FUNCTION__, node->name); 680 return ret;
968 cleanup_spu_base();
969 break;
970 }
971 } 681 }
972 682
973 xmon_register_spus(&spu_full_list); 683 xmon_register_spus(&spu_full_list);