aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/tegra-smmu.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2015-07-27 08:29:05 -0400
committerThierry Reding <treding@nvidia.com>2015-08-13 10:06:37 -0400
commitb98e34f0c6f1c4ac7af41afecc4a26f5f2ebe68d (patch)
treed9be0064cc9b838dff0aef519dc7d903a9808d19 /drivers/iommu/tegra-smmu.c
parent9113785c3e918187b6b0c084c60e0344a2f1685c (diff)
iommu/tegra-smmu: Fix unmap() method
The Tegra SMMU unmap path has several problems: 1. as_pte_put() can perform a write-after-free 2. tegra_smmu_unmap() can perform cache maintanence on a page we have just freed. 3. when a page table is unmapped, there is no CPU cache maintanence of the write clearing the page directory entry, nor is there any maintanence of the IOMMU to ensure that it sees the page table has gone. Fix this by getting rid of as_pte_put(), and instead coding the PTE unmap separately from the PDE unmap, placing the PDE unmap after the PTE unmap has been completed. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Thierry Reding <treding@nvidia.com>
Diffstat (limited to 'drivers/iommu/tegra-smmu.c')
-rw-r--r--drivers/iommu/tegra-smmu.c37
1 files changed, 23 insertions, 14 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 083354903a1a..a7a7645fb268 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -509,29 +509,35 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
509 return &pt[pte]; 509 return &pt[pte];
510} 510}
511 511
512static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova) 512static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
513{ 513{
514 struct tegra_smmu *smmu = as->smmu;
514 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff; 515 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
515 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
516 u32 *count = page_address(as->count); 516 u32 *count = page_address(as->count);
517 u32 *pd = page_address(as->pd), *pt; 517 u32 *pd = page_address(as->pd);
518 struct page *page; 518 struct page *page;
519 519
520 page = pfn_to_page(pd[pde] & as->smmu->pfn_mask); 520 page = pfn_to_page(pd[pde] & smmu->pfn_mask);
521 pt = page_address(page);
522 521
523 /* 522 /*
524 * When no entries in this page table are used anymore, return the 523 * When no entries in this page table are used anymore, return the
525 * memory page to the system. 524 * memory page to the system.
526 */ 525 */
527 if (pt[pte] != 0) { 526 if (--count[pde] == 0) {
528 if (--count[pde] == 0) { 527 unsigned int offset = pde * sizeof(*pd);
529 ClearPageReserved(page);
530 __free_page(page);
531 pd[pde] = 0;
532 }
533 528
534 pt[pte] = 0; 529 /* Clear the page directory entry first */
530 pd[pde] = 0;
531
532 /* Flush the page directory entry */
533 smmu->soc->ops->flush_dcache(as->pd, offset, sizeof(*pd));
534 smmu_flush_ptc(smmu, as->pd, offset);
535 smmu_flush_tlb_section(smmu, as->id, iova);
536 smmu_flush(smmu);
537
538 /* Finally, free the page */
539 ClearPageReserved(page);
540 __free_page(page);
535 } 541 }
536} 542}
537 543
@@ -569,17 +575,20 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
569 u32 *pte; 575 u32 *pte;
570 576
571 pte = as_get_pte(as, iova, &page); 577 pte = as_get_pte(as, iova, &page);
572 if (!pte) 578 if (!pte || !*pte)
573 return 0; 579 return 0;
574 580
581 *pte = 0;
582
575 offset = offset_in_page(pte); 583 offset = offset_in_page(pte);
576 as_put_pte(as, iova);
577 584
578 smmu->soc->ops->flush_dcache(page, offset, 4); 585 smmu->soc->ops->flush_dcache(page, offset, 4);
579 smmu_flush_ptc(smmu, page, offset); 586 smmu_flush_ptc(smmu, page, offset);
580 smmu_flush_tlb_group(smmu, as->id, iova); 587 smmu_flush_tlb_group(smmu, as->id, iova);
581 smmu_flush(smmu); 588 smmu_flush(smmu);
582 589
590 tegra_smmu_pte_put_use(as, iova);
591
583 return size; 592 return size;
584} 593}
585 594