summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
authorLu Baolu <baolu.lu@linux.intel.com>2019-09-06 02:14:51 -0400
committerJoerg Roedel <jroedel@suse.de>2019-09-11 06:34:30 -0400
commit3b53034c268d550d9e8522e613a14ab53b8840d8 (patch)
treea911b80bfe356337d238226ee5641723d60ff567 /drivers/iommu/intel-iommu.c
parentc5a5dc4cbbf4540c1891cdb2b70cf469405ea61f (diff)
iommu/vt-d: Add trace events for device dma map/unmap
This adds trace support for the Intel IOMMU driver. It also declares some events which could be used to trace the events when an IOVA is being mapped or unmapped in a domain. Cc: Ashok Raj <ashok.raj@intel.com> Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0af7b4669264..12831beead02 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3512,6 +3512,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3512 3512
3513 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT; 3513 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3514 start_paddr += paddr & ~PAGE_MASK; 3514 start_paddr += paddr & ~PAGE_MASK;
3515
3516 trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT);
3517
3515 return start_paddr; 3518 return start_paddr;
3516 3519
3517error: 3520error:
@@ -3567,10 +3570,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3567 if (dev_is_pci(dev)) 3570 if (dev_is_pci(dev))
3568 pdev = to_pci_dev(dev); 3571 pdev = to_pci_dev(dev);
3569 3572
3570 dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
3571
3572 freelist = domain_unmap(domain, start_pfn, last_pfn); 3573 freelist = domain_unmap(domain, start_pfn, last_pfn);
3573
3574 if (intel_iommu_strict || (pdev && pdev->untrusted) || 3574 if (intel_iommu_strict || (pdev && pdev->untrusted) ||
3575 !has_iova_flush_queue(&domain->iovad)) { 3575 !has_iova_flush_queue(&domain->iovad)) {
3576 iommu_flush_iotlb_psi(iommu, domain, start_pfn, 3576 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
@@ -3586,6 +3586,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3586 * cpu used up by the iotlb flush operation... 3586 * cpu used up by the iotlb flush operation...
3587 */ 3587 */
3588 } 3588 }
3589
3590 trace_unmap_single(dev, dev_addr, size);
3589} 3591}
3590 3592
3591static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, 3593static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
@@ -3676,6 +3678,8 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3676 } 3678 }
3677 3679
3678 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT); 3680 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3681
3682 trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3679} 3683}
3680 3684
3681static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, 3685static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
@@ -3732,6 +3736,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
3732 return 0; 3736 return 0;
3733 } 3737 }
3734 3738
3739 trace_map_sg(dev, iova_pfn << PAGE_SHIFT,
3740 sg_phys(sglist), size << VTD_PAGE_SHIFT);
3741
3735 return nelems; 3742 return nelems;
3736} 3743}
3737 3744