summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/rockchip-iommu.c
diff options
context:
space:
mode:
authorTomasz Figa <tfiga@chromium.org>2015-04-20 07:43:44 -0400
committerJoerg Roedel <jroedel@suse.de>2015-05-05 11:52:11 -0400
commitd4dd920cc2f6a948f07544caaa22bccb63378a3a (patch)
tree18ccd63fcd755756de6f1dfa8f2664adab9bd1d5 /drivers/iommu/rockchip-iommu.c
parent5ebe6afaf0057ac3eaeb98defd5456894b446d22 (diff)
iommu/rockchip: Make sure that page table state is coherent
To flush created mappings, current mapping code relies on the fact that during unmap the driver zaps every IOVA being unmapped and that it is enough to zap a single IOVA of page table to remove the entire page table from IOMMU cache. Based on these assumptions the driver was made to simply zap the first IOVA of the mapping being created. This is enough to invalidate first page table, which could be shared with another mapping (and thus could be already present in IOMMU cache), but unfortunately it does not do anything about the last page table that could be shared with other mappings as well. Moreover, the flushing is performed before page table contents are actually modified, so there is a race between the CPU updating the page tables and hardware that could be possibly running at the same time and triggering IOMMU look-ups, which could bring back the page tables back to the cache. To fix both issues, this patch makes the mapping code zap first and last (if they are different) IOVAs of new mapping after the page table is updated. Signed-off-by: Tomasz Figa <tfiga@chromium.org> Reviewed-by: Daniel Kurtz <djkurtz@chromium.org> Tested-by: Heiko Stuebner <heiko@sntech.de> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/rockchip-iommu.c')
-rw-r--r--drivers/iommu/rockchip-iommu.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 4015560bf486..31004c040578 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -551,6 +551,15 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
551 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 551 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
552} 552}
553 553
554static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
555 dma_addr_t iova, size_t size)
556{
557 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
558 if (size > SPAGE_SIZE)
559 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
560 SPAGE_SIZE);
561}
562
554static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, 563static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
555 dma_addr_t iova) 564 dma_addr_t iova)
556{ 565{
@@ -575,12 +584,6 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
575 rk_table_flush(page_table, NUM_PT_ENTRIES); 584 rk_table_flush(page_table, NUM_PT_ENTRIES);
576 rk_table_flush(dte_addr, 1); 585 rk_table_flush(dte_addr, 1);
577 586
578 /*
579 * Zap the first iova of newly allocated page table so iommu evicts
580 * old cached value of new dte from the iotlb.
581 */
582 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
583
584done: 587done:
585 pt_phys = rk_dte_pt_address(dte); 588 pt_phys = rk_dte_pt_address(dte);
586 return (u32 *)phys_to_virt(pt_phys); 589 return (u32 *)phys_to_virt(pt_phys);
@@ -630,6 +633,14 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
630 633
631 rk_table_flush(pte_addr, pte_count); 634 rk_table_flush(pte_addr, pte_count);
632 635
636 /*
637 * Zap the first and last iova to evict from iotlb any previously
638 * mapped cachelines holding stale values for its dte and pte.
639 * We only zap the first and last iova, since only they could have
640 * dte or pte shared with an existing mapping.
641 */
642 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
643
633 return 0; 644 return 0;
634unwind: 645unwind:
635 /* Unmap the range of iovas that we just mapped */ 646 /* Unmap the range of iovas that we just mapped */