aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/pci_sun4v.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/pci_sun4v.c')
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c402
1 files changed, 56 insertions, 346 deletions
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index ec22cd61ec8c..94295c219329 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -1,6 +1,6 @@
1/* pci_sun4v.c: SUN4V specific PCI controller support. 1/* pci_sun4v.c: SUN4V specific PCI controller support.
2 * 2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
@@ -29,7 +29,7 @@
29 29
30#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 30#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
31 31
32struct pci_iommu_batch { 32struct iommu_batch {
33 struct pci_dev *pdev; /* Device mapping is for. */ 33 struct pci_dev *pdev; /* Device mapping is for. */
34 unsigned long prot; /* IOMMU page protections */ 34 unsigned long prot; /* IOMMU page protections */
35 unsigned long entry; /* Index into IOTSB. */ 35 unsigned long entry; /* Index into IOTSB. */
@@ -37,12 +37,12 @@ struct pci_iommu_batch {
37 unsigned long npages; /* Number of pages in list. */ 37 unsigned long npages; /* Number of pages in list. */
38}; 38};
39 39
40static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); 40static DEFINE_PER_CPU(struct iommu_batch, pci_iommu_batch);
41 41
42/* Interrupts must be disabled. */ 42/* Interrupts must be disabled. */
43static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) 43static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry)
44{ 44{
45 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 45 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
46 46
47 p->pdev = pdev; 47 p->pdev = pdev;
48 p->prot = prot; 48 p->prot = prot;
@@ -51,10 +51,10 @@ static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long pro
51} 51}
52 52
53/* Interrupts must be disabled. */ 53/* Interrupts must be disabled. */
54static long pci_iommu_batch_flush(struct pci_iommu_batch *p) 54static long pci_iommu_batch_flush(struct iommu_batch *p)
55{ 55{
56 struct pcidev_cookie *pcp = p->pdev->sysdata; 56 struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller;
57 unsigned long devhandle = pcp->pbm->devhandle; 57 unsigned long devhandle = pbm->devhandle;
58 unsigned long prot = p->prot; 58 unsigned long prot = p->prot;
59 unsigned long entry = p->entry; 59 unsigned long entry = p->entry;
60 u64 *pglist = p->pglist; 60 u64 *pglist = p->pglist;
@@ -89,7 +89,7 @@ static long pci_iommu_batch_flush(struct pci_iommu_batch *p)
89/* Interrupts must be disabled. */ 89/* Interrupts must be disabled. */
90static inline long pci_iommu_batch_add(u64 phys_page) 90static inline long pci_iommu_batch_add(u64 phys_page)
91{ 91{
92 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 92 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
93 93
94 BUG_ON(p->npages >= PGLIST_NENTS); 94 BUG_ON(p->npages >= PGLIST_NENTS);
95 95
@@ -103,14 +103,14 @@ static inline long pci_iommu_batch_add(u64 phys_page)
103/* Interrupts must be disabled. */ 103/* Interrupts must be disabled. */
104static inline long pci_iommu_batch_end(void) 104static inline long pci_iommu_batch_end(void)
105{ 105{
106 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 106 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
107 107
108 BUG_ON(p->npages >= PGLIST_NENTS); 108 BUG_ON(p->npages >= PGLIST_NENTS);
109 109
110 return pci_iommu_batch_flush(p); 110 return pci_iommu_batch_flush(p);
111} 111}
112 112
113static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) 113static long pci_arena_alloc(struct iommu_arena *arena, unsigned long npages)
114{ 114{
115 unsigned long n, i, start, end, limit; 115 unsigned long n, i, start, end, limit;
116 int pass; 116 int pass;
@@ -149,7 +149,7 @@ again:
149 return n; 149 return n;
150} 150}
151 151
152static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) 152static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
153{ 153{
154 unsigned long i; 154 unsigned long i;
155 155
@@ -159,8 +159,7 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un
159 159
160static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) 160static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
161{ 161{
162 struct pcidev_cookie *pcp; 162 struct iommu *iommu;
163 struct pci_iommu *iommu;
164 unsigned long flags, order, first_page, npages, n; 163 unsigned long flags, order, first_page, npages, n;
165 void *ret; 164 void *ret;
166 long entry; 165 long entry;
@@ -178,8 +177,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
178 177
179 memset((char *)first_page, 0, PAGE_SIZE << order); 178 memset((char *)first_page, 0, PAGE_SIZE << order);
180 179
181 pcp = pdev->sysdata; 180 iommu = pdev->dev.archdata.iommu;
182 iommu = pcp->pbm->iommu;
183 181
184 spin_lock_irqsave(&iommu->lock, flags); 182 spin_lock_irqsave(&iommu->lock, flags);
185 entry = pci_arena_alloc(&iommu->arena, npages); 183 entry = pci_arena_alloc(&iommu->arena, npages);
@@ -226,15 +224,15 @@ arena_alloc_fail:
226 224
227static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) 225static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
228{ 226{
229 struct pcidev_cookie *pcp; 227 struct pci_pbm_info *pbm;
230 struct pci_iommu *iommu; 228 struct iommu *iommu;
231 unsigned long flags, order, npages, entry; 229 unsigned long flags, order, npages, entry;
232 u32 devhandle; 230 u32 devhandle;
233 231
234 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 232 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
235 pcp = pdev->sysdata; 233 iommu = pdev->dev.archdata.iommu;
236 iommu = pcp->pbm->iommu; 234 pbm = pdev->dev.archdata.host_controller;
237 devhandle = pcp->pbm->devhandle; 235 devhandle = pbm->devhandle;
238 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 236 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
239 237
240 spin_lock_irqsave(&iommu->lock, flags); 238 spin_lock_irqsave(&iommu->lock, flags);
@@ -259,16 +257,14 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
259 257
260static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) 258static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
261{ 259{
262 struct pcidev_cookie *pcp; 260 struct iommu *iommu;
263 struct pci_iommu *iommu;
264 unsigned long flags, npages, oaddr; 261 unsigned long flags, npages, oaddr;
265 unsigned long i, base_paddr; 262 unsigned long i, base_paddr;
266 u32 bus_addr, ret; 263 u32 bus_addr, ret;
267 unsigned long prot; 264 unsigned long prot;
268 long entry; 265 long entry;
269 266
270 pcp = pdev->sysdata; 267 iommu = pdev->dev.archdata.iommu;
271 iommu = pcp->pbm->iommu;
272 268
273 if (unlikely(direction == PCI_DMA_NONE)) 269 if (unlikely(direction == PCI_DMA_NONE))
274 goto bad; 270 goto bad;
@@ -324,8 +320,8 @@ iommu_map_fail:
324 320
325static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 321static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
326{ 322{
327 struct pcidev_cookie *pcp; 323 struct pci_pbm_info *pbm;
328 struct pci_iommu *iommu; 324 struct iommu *iommu;
329 unsigned long flags, npages; 325 unsigned long flags, npages;
330 long entry; 326 long entry;
331 u32 devhandle; 327 u32 devhandle;
@@ -336,9 +332,9 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
336 return; 332 return;
337 } 333 }
338 334
339 pcp = pdev->sysdata; 335 iommu = pdev->dev.archdata.iommu;
340 iommu = pcp->pbm->iommu; 336 pbm = pdev->dev.archdata.host_controller;
341 devhandle = pcp->pbm->devhandle; 337 devhandle = pbm->devhandle;
342 338
343 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 339 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
344 npages >>= IO_PAGE_SHIFT; 340 npages >>= IO_PAGE_SHIFT;
@@ -460,8 +456,7 @@ iommu_map_failed:
460 456
461static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 457static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
462{ 458{
463 struct pcidev_cookie *pcp; 459 struct iommu *iommu;
464 struct pci_iommu *iommu;
465 unsigned long flags, npages, prot; 460 unsigned long flags, npages, prot;
466 u32 dma_base; 461 u32 dma_base;
467 struct scatterlist *sgtmp; 462 struct scatterlist *sgtmp;
@@ -480,8 +475,7 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
480 return 1; 475 return 1;
481 } 476 }
482 477
483 pcp = pdev->sysdata; 478 iommu = pdev->dev.archdata.iommu;
484 iommu = pcp->pbm->iommu;
485 479
486 if (unlikely(direction == PCI_DMA_NONE)) 480 if (unlikely(direction == PCI_DMA_NONE))
487 goto bad; 481 goto bad;
@@ -537,8 +531,8 @@ iommu_map_failed:
537 531
538static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 532static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
539{ 533{
540 struct pcidev_cookie *pcp; 534 struct pci_pbm_info *pbm;
541 struct pci_iommu *iommu; 535 struct iommu *iommu;
542 unsigned long flags, i, npages; 536 unsigned long flags, i, npages;
543 long entry; 537 long entry;
544 u32 devhandle, bus_addr; 538 u32 devhandle, bus_addr;
@@ -548,9 +542,9 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
548 WARN_ON(1); 542 WARN_ON(1);
549 } 543 }
550 544
551 pcp = pdev->sysdata; 545 iommu = pdev->dev.archdata.iommu;
552 iommu = pcp->pbm->iommu; 546 pbm = pdev->dev.archdata.host_controller;
553 devhandle = pcp->pbm->devhandle; 547 devhandle = pbm->devhandle;
554 548
555 bus_addr = sglist->dma_address & IO_PAGE_MASK; 549 bus_addr = sglist->dma_address & IO_PAGE_MASK;
556 550
@@ -589,7 +583,7 @@ static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist
589 /* Nothing to do... */ 583 /* Nothing to do... */
590} 584}
591 585
592struct pci_iommu_ops pci_sun4v_iommu_ops = { 586const struct pci_iommu_ops pci_sun4v_iommu_ops = {
593 .alloc_consistent = pci_4v_alloc_consistent, 587 .alloc_consistent = pci_4v_alloc_consistent,
594 .free_consistent = pci_4v_free_consistent, 588 .free_consistent = pci_4v_free_consistent,
595 .map_single = pci_4v_map_single, 589 .map_single = pci_4v_map_single,
@@ -600,132 +594,12 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = {
600 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, 594 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
601}; 595};
602 596
603/* SUN4V PCI configuration space accessors. */
604
605struct pdev_entry {
606 struct pdev_entry *next;
607 u32 devhandle;
608 unsigned int bus;
609 unsigned int device;
610 unsigned int func;
611};
612
613#define PDEV_HTAB_SIZE 16
614#define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
615static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];
616
617static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
618{
619 unsigned int val;
620
621 val = (devhandle ^ (devhandle >> 4));
622 val ^= bus;
623 val ^= device;
624 val ^= func;
625
626 return val & PDEV_HTAB_MASK;
627}
628
629static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
630{
631 struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
632 struct pdev_entry **slot;
633
634 if (!p)
635 return -ENOMEM;
636
637 slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
638 p->next = *slot;
639 *slot = p;
640
641 p->devhandle = devhandle;
642 p->bus = bus;
643 p->device = device;
644 p->func = func;
645
646 return 0;
647}
648
649/* Recursively descend into the OBP device tree, rooted at toplevel_node,
650 * looking for a PCI device matching bus and devfn.
651 */
652static int obp_find(struct device_node *toplevel_node, unsigned int bus, unsigned int devfn)
653{
654 toplevel_node = toplevel_node->child;
655
656 while (toplevel_node != NULL) {
657 struct linux_prom_pci_registers *regs;
658 struct property *prop;
659 int ret;
660
661 ret = obp_find(toplevel_node, bus, devfn);
662 if (ret != 0)
663 return ret;
664
665 prop = of_find_property(toplevel_node, "reg", NULL);
666 if (!prop)
667 goto next_sibling;
668
669 regs = prop->value;
670 if (((regs->phys_hi >> 16) & 0xff) == bus &&
671 ((regs->phys_hi >> 8) & 0xff) == devfn)
672 break;
673
674 next_sibling:
675 toplevel_node = toplevel_node->sibling;
676 }
677
678 return toplevel_node != NULL;
679}
680
681static int pdev_htab_populate(struct pci_pbm_info *pbm)
682{
683 u32 devhandle = pbm->devhandle;
684 unsigned int bus;
685
686 for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
687 unsigned int devfn;
688
689 for (devfn = 0; devfn < 256; devfn++) {
690 unsigned int device = PCI_SLOT(devfn);
691 unsigned int func = PCI_FUNC(devfn);
692
693 if (obp_find(pbm->prom_node, bus, devfn)) {
694 int err = pdev_htab_add(devhandle, bus,
695 device, func);
696 if (err)
697 return err;
698 }
699 }
700 }
701
702 return 0;
703}
704
705static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
706{
707 struct pdev_entry *p;
708
709 p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
710 while (p) {
711 if (p->devhandle == devhandle &&
712 p->bus == bus &&
713 p->device == device &&
714 p->func == func)
715 break;
716
717 p = p->next;
718 }
719
720 return p;
721}
722
723static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) 597static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
724{ 598{
725 if (bus < pbm->pci_first_busno || 599 if (bus < pbm->pci_first_busno ||
726 bus > pbm->pci_last_busno) 600 bus > pbm->pci_last_busno)
727 return 1; 601 return 1;
728 return pdev_find(pbm->devhandle, bus, device, func) == NULL; 602 return 0;
729} 603}
730 604
731static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, 605static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
@@ -738,6 +612,9 @@ static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
738 unsigned int func = PCI_FUNC(devfn); 612 unsigned int func = PCI_FUNC(devfn);
739 unsigned long ret; 613 unsigned long ret;
740 614
615 if (bus_dev == pbm->pci_bus && devfn == 0x00)
616 return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
617 size, value);
741 if (pci_sun4v_out_of_range(pbm, bus, device, func)) { 618 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
742 ret = ~0UL; 619 ret = ~0UL;
743 } else { 620 } else {
@@ -776,6 +653,9 @@ static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
776 unsigned int func = PCI_FUNC(devfn); 653 unsigned int func = PCI_FUNC(devfn);
777 unsigned long ret; 654 unsigned long ret;
778 655
656 if (bus_dev == pbm->pci_bus && devfn == 0x00)
657 return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
658 size, value);
779 if (pci_sun4v_out_of_range(pbm, bus, device, func)) { 659 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
780 /* Do nothing. */ 660 /* Do nothing. */
781 } else { 661 } else {
@@ -800,27 +680,7 @@ static struct pci_ops pci_sun4v_ops = {
800static void pbm_scan_bus(struct pci_controller_info *p, 680static void pbm_scan_bus(struct pci_controller_info *p,
801 struct pci_pbm_info *pbm) 681 struct pci_pbm_info *pbm)
802{ 682{
803 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 683 pbm->pci_bus = pci_scan_one_pbm(pbm);
804
805 if (!cookie) {
806 prom_printf("%s: Critical allocation failure.\n", pbm->name);
807 prom_halt();
808 }
809
810 /* All we care about is the PBM. */
811 cookie->pbm = pbm;
812
813 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
814#if 0
815 pci_fixup_host_bridge_self(pbm->pci_bus);
816 pbm->pci_bus->self->sysdata = cookie;
817#endif
818 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
819 pci_record_assignments(pbm, pbm->pci_bus);
820 pci_assign_unassigned(pbm, pbm->pci_bus);
821 pci_fixup_irq(pbm, pbm->pci_bus);
822 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
823 pci_setup_busmastering(pbm, pbm->pci_bus);
824} 684}
825 685
826static void pci_sun4v_scan_bus(struct pci_controller_info *p) 686static void pci_sun4v_scan_bus(struct pci_controller_info *p)
@@ -844,130 +704,10 @@ static void pci_sun4v_scan_bus(struct pci_controller_info *p)
844 /* XXX register error interrupt handlers XXX */ 704 /* XXX register error interrupt handlers XXX */
845} 705}
846 706
847static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
848{
849 struct pcidev_cookie *pcp = pdev->sysdata;
850 struct pci_pbm_info *pbm = pcp->pbm;
851 struct resource *res, *root;
852 u32 reg;
853 int where, size, is_64bit;
854
855 res = &pdev->resource[resource];
856 if (resource < 6) {
857 where = PCI_BASE_ADDRESS_0 + (resource * 4);
858 } else if (resource == PCI_ROM_RESOURCE) {
859 where = pdev->rom_base_reg;
860 } else {
861 /* Somebody might have asked allocation of a non-standard resource */
862 return;
863 }
864
865 /* XXX 64-bit MEM handling is not %100 correct... XXX */
866 is_64bit = 0;
867 if (res->flags & IORESOURCE_IO)
868 root = &pbm->io_space;
869 else {
870 root = &pbm->mem_space;
871 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
872 == PCI_BASE_ADDRESS_MEM_TYPE_64)
873 is_64bit = 1;
874 }
875
876 size = res->end - res->start;
877 pci_read_config_dword(pdev, where, &reg);
878 reg = ((reg & size) |
879 (((u32)(res->start - root->start)) & ~size));
880 if (resource == PCI_ROM_RESOURCE) {
881 reg |= PCI_ROM_ADDRESS_ENABLE;
882 res->flags |= IORESOURCE_ROM_ENABLE;
883 }
884 pci_write_config_dword(pdev, where, reg);
885
886 /* This knows that the upper 32-bits of the address
887 * must be zero. Our PCI common layer enforces this.
888 */
889 if (is_64bit)
890 pci_write_config_dword(pdev, where + 4, 0);
891}
892
893static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
894 struct resource *res,
895 struct resource *root)
896{
897 res->start += root->start;
898 res->end += root->start;
899}
900
901/* Use ranges property to determine where PCI MEM, I/O, and Config
902 * space are for this PCI bus module.
903 */
904static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
905{
906 int i, saw_mem, saw_io;
907
908 saw_mem = saw_io = 0;
909 for (i = 0; i < pbm->num_pbm_ranges; i++) {
910 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
911 unsigned long a;
912 int type;
913
914 type = (pr->child_phys_hi >> 24) & 0x3;
915 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
916 ((unsigned long)pr->parent_phys_lo << 0UL));
917
918 switch (type) {
919 case 1:
920 /* 16-bit IO space, 16MB */
921 pbm->io_space.start = a;
922 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
923 pbm->io_space.flags = IORESOURCE_IO;
924 saw_io = 1;
925 break;
926
927 case 2:
928 /* 32-bit MEM space, 2GB */
929 pbm->mem_space.start = a;
930 pbm->mem_space.end = a + (0x80000000UL - 1UL);
931 pbm->mem_space.flags = IORESOURCE_MEM;
932 saw_mem = 1;
933 break;
934
935 case 3:
936 /* XXX 64-bit MEM handling XXX */
937
938 default:
939 break;
940 };
941 }
942
943 if (!saw_io || !saw_mem) {
944 prom_printf("%s: Fatal error, missing %s PBM range.\n",
945 pbm->name,
946 (!saw_io ? "IO" : "MEM"));
947 prom_halt();
948 }
949
950 printk("%s: PCI IO[%lx] MEM[%lx]\n",
951 pbm->name,
952 pbm->io_space.start,
953 pbm->mem_space.start);
954}
955
956static void pbm_register_toplevel_resources(struct pci_controller_info *p,
957 struct pci_pbm_info *pbm)
958{
959 pbm->io_space.name = pbm->mem_space.name = pbm->name;
960
961 request_resource(&ioport_resource, &pbm->io_space);
962 request_resource(&iomem_resource, &pbm->mem_space);
963 pci_register_legacy_regions(&pbm->io_space,
964 &pbm->mem_space);
965}
966
967static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, 707static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
968 struct pci_iommu *iommu) 708 struct iommu *iommu)
969{ 709{
970 struct pci_iommu_arena *arena = &iommu->arena; 710 struct iommu_arena *arena = &iommu->arena;
971 unsigned long i, cnt = 0; 711 unsigned long i, cnt = 0;
972 u32 devhandle; 712 u32 devhandle;
973 713
@@ -994,7 +734,7 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
994 734
995static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) 735static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
996{ 736{
997 struct pci_iommu *iommu = pbm->iommu; 737 struct iommu *iommu = pbm->iommu;
998 struct property *prop; 738 struct property *prop;
999 unsigned long num_tsb_entries, sz; 739 unsigned long num_tsb_entries, sz;
1000 u32 vdma[2], dma_mask, dma_offset; 740 u32 vdma[2], dma_mask, dma_offset;
@@ -1281,7 +1021,7 @@ h_error:
1281 1021
1282static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) 1022static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1283{ 1023{
1284 u32 *val; 1024 const u32 *val;
1285 int len; 1025 int len;
1286 1026
1287 val = of_get_property(pbm->prom_node, "#msi-eqs", &len); 1027 val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
@@ -1289,16 +1029,16 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1289 goto no_msi; 1029 goto no_msi;
1290 pbm->msiq_num = *val; 1030 pbm->msiq_num = *val;
1291 if (pbm->msiq_num) { 1031 if (pbm->msiq_num) {
1292 struct msiq_prop { 1032 const struct msiq_prop {
1293 u32 first_msiq; 1033 u32 first_msiq;
1294 u32 num_msiq; 1034 u32 num_msiq;
1295 u32 first_devino; 1035 u32 first_devino;
1296 } *mqp; 1036 } *mqp;
1297 struct msi_range_prop { 1037 const struct msi_range_prop {
1298 u32 first_msi; 1038 u32 first_msi;
1299 u32 num_msi; 1039 u32 num_msi;
1300 } *mrng; 1040 } *mrng;
1301 struct addr_range_prop { 1041 const struct addr_range_prop {
1302 u32 msi32_high; 1042 u32 msi32_high;
1303 u32 msi32_low; 1043 u32 msi32_low;
1304 u32 msi32_len; 1044 u32 msi32_len;
@@ -1410,8 +1150,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
1410 struct pci_dev *pdev, 1150 struct pci_dev *pdev,
1411 struct msi_desc *entry) 1151 struct msi_desc *entry)
1412{ 1152{
1413 struct pcidev_cookie *pcp = pdev->sysdata; 1153 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1414 struct pci_pbm_info *pbm = pcp->pbm;
1415 unsigned long devino, msiqid; 1154 unsigned long devino, msiqid;
1416 struct msi_msg msg; 1155 struct msi_msg msg;
1417 int msi_num, err; 1156 int msi_num, err;
@@ -1455,7 +1194,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
1455 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID)) 1194 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID))
1456 goto out_err; 1195 goto out_err;
1457 1196
1458 pcp->msi_num = msi_num; 1197 pdev->dev.archdata.msi_num = msi_num;
1459 1198
1460 if (entry->msi_attrib.is_64) { 1199 if (entry->msi_attrib.is_64) {
1461 msg.address_hi = pbm->msi64_start >> 32; 1200 msg.address_hi = pbm->msi64_start >> 32;
@@ -1484,12 +1223,11 @@ out_err:
1484static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq, 1223static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq,
1485 struct pci_dev *pdev) 1224 struct pci_dev *pdev)
1486{ 1225{
1487 struct pcidev_cookie *pcp = pdev->sysdata; 1226 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1488 struct pci_pbm_info *pbm = pcp->pbm;
1489 unsigned long msiqid, err; 1227 unsigned long msiqid, err;
1490 unsigned int msi_num; 1228 unsigned int msi_num;
1491 1229
1492 msi_num = pcp->msi_num; 1230 msi_num = pdev->dev.archdata.msi_num;
1493 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid); 1231 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid);
1494 if (err) { 1232 if (err) {
1495 printk(KERN_ERR "%s: getmsiq gives error %lu\n", 1233 printk(KERN_ERR "%s: getmsiq gives error %lu\n",
@@ -1516,8 +1254,6 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1516static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle) 1254static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
1517{ 1255{
1518 struct pci_pbm_info *pbm; 1256 struct pci_pbm_info *pbm;
1519 struct property *prop;
1520 int len, i;
1521 1257
1522 if (devhandle & 0x40) 1258 if (devhandle & 0x40)
1523 pbm = &p->pbm_B; 1259 pbm = &p->pbm_B;
@@ -1526,7 +1262,6 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
1526 1262
1527 pbm->parent = p; 1263 pbm->parent = p;
1528 pbm->prom_node = dp; 1264 pbm->prom_node = dp;
1529 pbm->pci_first_slot = 1;
1530 1265
1531 pbm->devhandle = devhandle; 1266 pbm->devhandle = devhandle;
1532 1267
@@ -1534,39 +1269,17 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
1534 1269
1535 printk("%s: SUN4V PCI Bus Module\n", pbm->name); 1270 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1536 1271
1537 prop = of_find_property(dp, "ranges", &len); 1272 pci_determine_mem_io_space(pbm);
1538 pbm->pbm_ranges = prop->value;
1539 pbm->num_pbm_ranges =
1540 (len / sizeof(struct linux_prom_pci_ranges));
1541
1542 /* Mask out the top 8 bits of the ranges, leaving the real
1543 * physical address.
1544 */
1545 for (i = 0; i < pbm->num_pbm_ranges; i++)
1546 pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
1547
1548 pci_sun4v_determine_mem_io_space(pbm);
1549 pbm_register_toplevel_resources(p, pbm);
1550
1551 prop = of_find_property(dp, "interrupt-map", &len);
1552 pbm->pbm_intmap = prop->value;
1553 pbm->num_pbm_intmap =
1554 (len / sizeof(struct linux_prom_pci_intmap));
1555
1556 prop = of_find_property(dp, "interrupt-map-mask", NULL);
1557 pbm->pbm_intmask = prop->value;
1558 1273
1559 pci_sun4v_get_bus_range(pbm); 1274 pci_sun4v_get_bus_range(pbm);
1560 pci_sun4v_iommu_init(pbm); 1275 pci_sun4v_iommu_init(pbm);
1561 pci_sun4v_msi_init(pbm); 1276 pci_sun4v_msi_init(pbm);
1562
1563 pdev_htab_populate(pbm);
1564} 1277}
1565 1278
1566void sun4v_pci_init(struct device_node *dp, char *model_name) 1279void sun4v_pci_init(struct device_node *dp, char *model_name)
1567{ 1280{
1568 struct pci_controller_info *p; 1281 struct pci_controller_info *p;
1569 struct pci_iommu *iommu; 1282 struct iommu *iommu;
1570 struct property *prop; 1283 struct property *prop;
1571 struct linux_prom64_registers *regs; 1284 struct linux_prom64_registers *regs;
1572 u32 devhandle; 1285 u32 devhandle;
@@ -1606,13 +1319,13 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
1606 if (!p) 1319 if (!p)
1607 goto fatal_memory_error; 1320 goto fatal_memory_error;
1608 1321
1609 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 1322 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1610 if (!iommu) 1323 if (!iommu)
1611 goto fatal_memory_error; 1324 goto fatal_memory_error;
1612 1325
1613 p->pbm_A.iommu = iommu; 1326 p->pbm_A.iommu = iommu;
1614 1327
1615 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 1328 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1616 if (!iommu) 1329 if (!iommu)
1617 goto fatal_memory_error; 1330 goto fatal_memory_error;
1618 1331
@@ -1622,11 +1335,8 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
1622 pci_controller_root = p; 1335 pci_controller_root = p;
1623 1336
1624 p->index = pci_num_controllers++; 1337 p->index = pci_num_controllers++;
1625 p->pbms_same_domain = 0;
1626 1338
1627 p->scan_bus = pci_sun4v_scan_bus; 1339 p->scan_bus = pci_sun4v_scan_bus;
1628 p->base_address_update = pci_sun4v_base_address_update;
1629 p->resource_adjust = pci_sun4v_resource_adjust;
1630#ifdef CONFIG_PCI_MSI 1340#ifdef CONFIG_PCI_MSI
1631 p->setup_msi_irq = pci_sun4v_setup_msi_irq; 1341 p->setup_msi_irq = pci_sun4v_setup_msi_irq;
1632 p->teardown_msi_irq = pci_sun4v_teardown_msi_irq; 1342 p->teardown_msi_irq = pci_sun4v_teardown_msi_irq;