diff options
Diffstat (limited to 'arch/powerpc/platforms/powernv/pci-ioda.c')
-rw-r--r-- | arch/powerpc/platforms/powernv/pci-ioda.c | 84 |
1 files changed, 84 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 7d6dcc6d5fa9..3b2b4fb3585b 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/msi.h> | 23 | #include <linux/msi.h> |
24 | #include <linux/memblock.h> | ||
24 | 25 | ||
25 | #include <asm/sections.h> | 26 | #include <asm/sections.h> |
26 | #include <asm/io.h> | 27 | #include <asm/io.h> |
@@ -460,9 +461,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev | |||
460 | return; | 461 | return; |
461 | 462 | ||
462 | pe = &phb->ioda.pe_array[pdn->pe_number]; | 463 | pe = &phb->ioda.pe_array[pdn->pe_number]; |
464 | WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); | ||
463 | set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); | 465 | set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); |
464 | } | 466 | } |
465 | 467 | ||
468 | static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb, | ||
469 | struct pci_dev *pdev, u64 dma_mask) | ||
470 | { | ||
471 | struct pci_dn *pdn = pci_get_pdn(pdev); | ||
472 | struct pnv_ioda_pe *pe; | ||
473 | uint64_t top; | ||
474 | bool bypass = false; | ||
475 | |||
476 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | ||
477 | return -ENODEV;; | ||
478 | |||
479 | pe = &phb->ioda.pe_array[pdn->pe_number]; | ||
480 | if (pe->tce_bypass_enabled) { | ||
481 | top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; | ||
482 | bypass = (dma_mask >= top); | ||
483 | } | ||
484 | |||
485 | if (bypass) { | ||
486 | dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n"); | ||
487 | set_dma_ops(&pdev->dev, &dma_direct_ops); | ||
488 | set_dma_offset(&pdev->dev, pe->tce_bypass_base); | ||
489 | } else { | ||
490 | dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n"); | ||
491 | set_dma_ops(&pdev->dev, &dma_iommu_ops); | ||
492 | set_iommu_table_base(&pdev->dev, &pe->tce32_table); | ||
493 | } | ||
494 | return 0; | ||
495 | } | ||
496 | |||
466 | static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) | 497 | static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) |
467 | { | 498 | { |
468 | struct pci_dev *dev; | 499 | struct pci_dev *dev; |
@@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, | |||
657 | __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); | 688 | __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); |
658 | } | 689 | } |
659 | 690 | ||
691 | static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable) | ||
692 | { | ||
693 | struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe, | ||
694 | tce32_table); | ||
695 | uint16_t window_id = (pe->pe_number << 1 ) + 1; | ||
696 | int64_t rc; | ||
697 | |||
698 | pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); | ||
699 | if (enable) { | ||
700 | phys_addr_t top = memblock_end_of_DRAM(); | ||
701 | |||
702 | top = roundup_pow_of_two(top); | ||
703 | rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, | ||
704 | pe->pe_number, | ||
705 | window_id, | ||
706 | pe->tce_bypass_base, | ||
707 | top); | ||
708 | } else { | ||
709 | rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, | ||
710 | pe->pe_number, | ||
711 | window_id, | ||
712 | pe->tce_bypass_base, | ||
713 | 0); | ||
714 | |||
715 | /* | ||
716 | * We might want to reset the DMA ops of all devices on | ||
717 | * this PE. However in theory, that shouldn't be necessary | ||
718 | * as this is used for VFIO/KVM pass-through and the device | ||
719 | * hasn't yet been returned to its kernel driver | ||
720 | */ | ||
721 | } | ||
722 | if (rc) | ||
723 | pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); | ||
724 | else | ||
725 | pe->tce_bypass_enabled = enable; | ||
726 | } | ||
727 | |||
728 | static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb, | ||
729 | struct pnv_ioda_pe *pe) | ||
730 | { | ||
731 | /* TVE #1 is selected by PCI address bit 59 */ | ||
732 | pe->tce_bypass_base = 1ull << 59; | ||
733 | |||
734 | /* Install set_bypass callback for VFIO */ | ||
735 | pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass; | ||
736 | |||
737 | /* Enable bypass by default */ | ||
738 | pnv_pci_ioda2_set_bypass(&pe->tce32_table, true); | ||
739 | } | ||
740 | |||
660 | static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, | 741 | static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, |
661 | struct pnv_ioda_pe *pe) | 742 | struct pnv_ioda_pe *pe) |
662 | { | 743 | { |
@@ -727,6 +808,8 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, | |||
727 | else | 808 | else |
728 | pnv_ioda_setup_bus_dma(pe, pe->pbus); | 809 | pnv_ioda_setup_bus_dma(pe, pe->pbus); |
729 | 810 | ||
811 | /* Also create a bypass window */ | ||
812 | pnv_pci_ioda2_setup_bypass_pe(phb, pe); | ||
730 | return; | 813 | return; |
731 | fail: | 814 | fail: |
732 | if (pe->tce32_seg >= 0) | 815 | if (pe->tce32_seg >= 0) |
@@ -1286,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, | |||
1286 | 1369 | ||
1287 | /* Setup TCEs */ | 1370 | /* Setup TCEs */ |
1288 | phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; | 1371 | phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; |
1372 | phb->dma_set_mask = pnv_pci_ioda_dma_set_mask; | ||
1289 | 1373 | ||
1290 | /* Setup shutdown function for kexec */ | 1374 | /* Setup shutdown function for kexec */ |
1291 | phb->shutdown = pnv_pci_ioda_shutdown; | 1375 | phb->shutdown = pnv_pci_ioda_shutdown; |