diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2015-07-27 08:29:21 -0400 |
---|---|---|
committer | Thierry Reding <treding@nvidia.com> | 2015-08-13 10:06:39 -0400 |
commit | 0b42c7c1132f331fba263f0d2ca23544770584b7 (patch) | |
tree | 70277b41ac6145870f6e4f3640bb36151eba9da7 | |
parent | 34d35f8cbe51bf93faf3214ee5b5d6f8ae7df4c1 (diff) |
iommu/tegra-smmu: Fix page table lookup in unmap/iova_to_phys methods
Fix the page table lookup in the unmap and iova_to_phys methods.
Neither of these methods should allocate a page table; a missing page
table should be treated the same as no mapping present.
More importantly, using as_get_pte() for an IOVA corresponding with a
non-present page table entry increments the use-count for the page
table, on the assumption that the caller of as_get_pte() is going to
setup a mapping. This is an incorrect assumption.
Fix both of these bugs by providing a separate helper which only looks
up the page table, but never allocates it. This is akin to pte_offset()
for CPU page tables.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Thierry Reding <treding@nvidia.com>
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 43 |
1 files changed, 34 insertions, 9 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 4c4bc7966046..bbff5b647183 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c | |||
@@ -475,12 +475,36 @@ static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *de | |||
475 | } | 475 | } |
476 | } | 476 | } |
477 | 477 | ||
478 | static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova) | ||
479 | { | ||
480 | u32 *pt = page_address(pt_page); | ||
481 | |||
482 | return pt + iova_pt_index(iova); | ||
483 | } | ||
484 | |||
485 | static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, | ||
486 | struct page **pagep) | ||
487 | { | ||
488 | unsigned int pd_index = iova_pd_index(iova); | ||
489 | struct page *pt_page; | ||
490 | u32 *pd; | ||
491 | |||
492 | pd = page_address(as->pd); | ||
493 | |||
494 | if (!pd[pd_index]) | ||
495 | return NULL; | ||
496 | |||
497 | pt_page = pfn_to_page(pd[pd_index] & as->smmu->pfn_mask); | ||
498 | *pagep = pt_page; | ||
499 | |||
500 | return tegra_smmu_pte_offset(pt_page, iova); | ||
501 | } | ||
502 | |||
478 | static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, | 503 | static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, |
479 | struct page **pagep) | 504 | struct page **pagep) |
480 | { | 505 | { |
481 | u32 *pd = page_address(as->pd), *pt, *count; | 506 | u32 *pd = page_address(as->pd), *pt, *count; |
482 | unsigned int pde = iova_pd_index(iova); | 507 | unsigned int pde = iova_pd_index(iova); |
483 | unsigned int pte = iova_pt_index(iova); | ||
484 | struct tegra_smmu *smmu = as->smmu; | 508 | struct tegra_smmu *smmu = as->smmu; |
485 | struct page *page; | 509 | struct page *page; |
486 | unsigned int i; | 510 | unsigned int i; |
@@ -506,17 +530,18 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, | |||
506 | smmu_flush(smmu); | 530 | smmu_flush(smmu); |
507 | } else { | 531 | } else { |
508 | page = pfn_to_page(pd[pde] & smmu->pfn_mask); | 532 | page = pfn_to_page(pd[pde] & smmu->pfn_mask); |
509 | pt = page_address(page); | ||
510 | } | 533 | } |
511 | 534 | ||
512 | *pagep = page; | 535 | *pagep = page; |
513 | 536 | ||
537 | pt = page_address(page); | ||
538 | |||
514 | /* Keep track of entries in this page table. */ | 539 | /* Keep track of entries in this page table. */ |
515 | count = page_address(as->count); | 540 | count = page_address(as->count); |
516 | if (pt[pte] == 0) | 541 | if (pt[iova_pt_index(iova)] == 0) |
517 | count[pde]++; | 542 | count[pde]++; |
518 | 543 | ||
519 | return &pt[pte]; | 544 | return tegra_smmu_pte_offset(page, iova); |
520 | } | 545 | } |
521 | 546 | ||
522 | static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) | 547 | static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) |
@@ -586,14 +611,14 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | |||
586 | size_t size) | 611 | size_t size) |
587 | { | 612 | { |
588 | struct tegra_smmu_as *as = to_smmu_as(domain); | 613 | struct tegra_smmu_as *as = to_smmu_as(domain); |
589 | struct page *page; | 614 | struct page *pte_page; |
590 | u32 *pte; | 615 | u32 *pte; |
591 | 616 | ||
592 | pte = as_get_pte(as, iova, &page); | 617 | pte = tegra_smmu_pte_lookup(as, iova, &pte_page); |
593 | if (!pte || !*pte) | 618 | if (!pte || !*pte) |
594 | return 0; | 619 | return 0; |
595 | 620 | ||
596 | tegra_smmu_set_pte(as, iova, pte, page, 0); | 621 | tegra_smmu_set_pte(as, iova, pte, pte_page, 0); |
597 | tegra_smmu_pte_put_use(as, iova); | 622 | tegra_smmu_pte_put_use(as, iova); |
598 | 623 | ||
599 | return size; | 624 | return size; |
@@ -603,11 +628,11 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, | |||
603 | dma_addr_t iova) | 628 | dma_addr_t iova) |
604 | { | 629 | { |
605 | struct tegra_smmu_as *as = to_smmu_as(domain); | 630 | struct tegra_smmu_as *as = to_smmu_as(domain); |
606 | struct page *page; | 631 | struct page *pte_page; |
607 | unsigned long pfn; | 632 | unsigned long pfn; |
608 | u32 *pte; | 633 | u32 *pte; |
609 | 634 | ||
610 | pte = as_get_pte(as, iova, &page); | 635 | pte = tegra_smmu_pte_lookup(as, iova, &pte_page); |
611 | if (!pte || !*pte) | 636 | if (!pte || !*pte) |
612 | return 0; | 637 | return 0; |
613 | 638 | ||