aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/pci_sabre.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-07-28 01:39:14 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-30 03:27:34 -0400
commitad7ad57c6127042c411353dddb723765964815db (patch)
tree600484291d9cfa68d54dc9b230f5bd115f495213 /arch/sparc64/kernel/pci_sabre.c
parentc7f439b99efbea74c70a5531f92566db5a6731f2 (diff)
[SPARC64]: Fix conflicts in SBUS/PCI/EBUS/ISA DMA handling.
Fully unify all of the DMA ops so that subordinate bus types to the DMA operation providers (such as ebus, isa, of_device) can work transparently. Basically, we just make sure that for every system device we create, the dev->archdata 'iommu' and 'stc' fields are filled in. Then we have two platform variants of the DMA ops, one for SUN4U which actually programs the real hardware, and one for SUN4V which makes hypervisor calls. This also fixes the crashes in parport_pc on sparc64, reported by Meelis Roos. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/pci_sabre.c')
-rw-r--r--arch/sparc64/kernel/pci_sabre.c35
1 files changed, 22 insertions, 13 deletions
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 22e1be5c7489..fba67c3d8809 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -672,18 +672,20 @@ static void sabre_scan_bus(struct pci_pbm_info *pbm)
672 sabre_register_error_handlers(pbm); 672 sabre_register_error_handlers(pbm);
673} 673}
674 674
675static void sabre_iommu_init(struct pci_pbm_info *pbm, 675static int sabre_iommu_init(struct pci_pbm_info *pbm,
676 int tsbsize, unsigned long dvma_offset, 676 int tsbsize, unsigned long dvma_offset,
677 u32 dma_mask) 677 u32 dma_mask)
678{ 678{
679 struct iommu *iommu = pbm->iommu; 679 struct iommu *iommu = pbm->iommu;
680 unsigned long i; 680 unsigned long i;
681 u64 control; 681 u64 control;
682 int err;
682 683
683 /* Register addresses. */ 684 /* Register addresses. */
684 iommu->iommu_control = pbm->controller_regs + SABRE_IOMMU_CONTROL; 685 iommu->iommu_control = pbm->controller_regs + SABRE_IOMMU_CONTROL;
685 iommu->iommu_tsbbase = pbm->controller_regs + SABRE_IOMMU_TSBBASE; 686 iommu->iommu_tsbbase = pbm->controller_regs + SABRE_IOMMU_TSBBASE;
686 iommu->iommu_flush = pbm->controller_regs + SABRE_IOMMU_FLUSH; 687 iommu->iommu_flush = pbm->controller_regs + SABRE_IOMMU_FLUSH;
688 iommu->iommu_tags = iommu->iommu_flush + (0xa580UL - 0x0210UL);
687 iommu->write_complete_reg = pbm->controller_regs + SABRE_WRSYNC; 689 iommu->write_complete_reg = pbm->controller_regs + SABRE_WRSYNC;
688 /* Sabre's IOMMU lacks ctx flushing. */ 690 /* Sabre's IOMMU lacks ctx flushing. */
689 iommu->iommu_ctxflush = 0; 691 iommu->iommu_ctxflush = 0;
@@ -701,7 +703,10 @@ static void sabre_iommu_init(struct pci_pbm_info *pbm,
701 /* Leave diag mode enabled for full-flushing done 703 /* Leave diag mode enabled for full-flushing done
702 * in pci_iommu.c 704 * in pci_iommu.c
703 */ 705 */
704 pci_iommu_table_init(iommu, tsbsize * 1024 * 8, dvma_offset, dma_mask); 706 err = iommu_table_init(iommu, tsbsize * 1024 * 8,
707 dvma_offset, dma_mask);
708 if (err)
709 return err;
705 710
706 sabre_write(pbm->controller_regs + SABRE_IOMMU_TSBBASE, 711 sabre_write(pbm->controller_regs + SABRE_IOMMU_TSBBASE,
707 __pa(iommu->page_table)); 712 __pa(iommu->page_table));
@@ -722,6 +727,8 @@ static void sabre_iommu_init(struct pci_pbm_info *pbm,
722 break; 727 break;
723 } 728 }
724 sabre_write(pbm->controller_regs + SABRE_IOMMU_CONTROL, control); 729 sabre_write(pbm->controller_regs + SABRE_IOMMU_CONTROL, control);
730
731 return 0;
725} 732}
726 733
727static void sabre_pbm_init(struct pci_controller_info *p, struct pci_pbm_info *pbm, struct device_node *dp) 734static void sabre_pbm_init(struct pci_controller_info *p, struct pci_pbm_info *pbm, struct device_node *dp)
@@ -775,16 +782,12 @@ void sabre_init(struct device_node *dp, char *model_name)
775 } 782 }
776 783
777 p = kzalloc(sizeof(*p), GFP_ATOMIC); 784 p = kzalloc(sizeof(*p), GFP_ATOMIC);
778 if (!p) { 785 if (!p)
779 prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n"); 786 goto fatal_memory_error;
780 prom_halt();
781 }
782 787
783 iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); 788 iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
784 if (!iommu) { 789 if (!iommu)
785 prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n"); 790 goto fatal_memory_error;
786 prom_halt();
787 }
788 pbm = &p->pbm_A; 791 pbm = &p->pbm_A;
789 pbm->iommu = iommu; 792 pbm->iommu = iommu;
790 793
@@ -847,10 +850,16 @@ void sabre_init(struct device_node *dp, char *model_name)
847 prom_halt(); 850 prom_halt();
848 } 851 }
849 852
850 sabre_iommu_init(pbm, tsbsize, vdma[0], dma_mask); 853 if (sabre_iommu_init(pbm, tsbsize, vdma[0], dma_mask))
854 goto fatal_memory_error;
851 855
852 /* 856 /*
853 * Look for APB underneath. 857 * Look for APB underneath.
854 */ 858 */
855 sabre_pbm_init(p, pbm, dp); 859 sabre_pbm_init(p, pbm, dp);
860 return;
861
862fatal_memory_error:
863 prom_printf("SABRE: Fatal memory allocation error.\n");
864 prom_halt();
856} 865}