aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2015-07-27 08:29:26 -0400
committerThierry Reding <treding@nvidia.com>2015-08-13 10:06:39 -0400
commit853520fa96511e4a49942d2cba34a329528c7e41 (patch)
treeb34b8dce2abde0c9ef555f509148ffa8c5708da2
parent0b42c7c1132f331fba263f0d2ca23544770584b7 (diff)
iommu/tegra-smmu: Store struct page pointer for page tables
Store the struct page pointer for the second level page tables, rather than working back from the page directory entry. This is necessary as we want to eliminate the use of physical addresses used with arch-private functions, switching instead to use the streaming DMA API. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Thierry Reding <treding@nvidia.com>
-rw-r--r--drivers/iommu/tegra-smmu.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index bbff5b647183..8ec5ac45caab 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -41,6 +41,7 @@ struct tegra_smmu_as {
41 struct tegra_smmu *smmu; 41 struct tegra_smmu *smmu;
42 unsigned int use_count; 42 unsigned int use_count;
43 struct page *count; 43 struct page *count;
44 struct page **pts;
44 struct page *pd; 45 struct page *pd;
45 unsigned id; 46 unsigned id;
46 u32 attr; 47 u32 attr;
@@ -271,6 +272,14 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
271 return NULL; 272 return NULL;
272 } 273 }
273 274
275 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
276 if (!as->pts) {
277 __free_page(as->count);
278 __free_page(as->pd);
279 kfree(as);
280 return NULL;
281 }
282
274 /* clear PDEs */ 283 /* clear PDEs */
275 pd = page_address(as->pd); 284 pd = page_address(as->pd);
276 SetPageReserved(as->pd); 285 SetPageReserved(as->pd);
@@ -487,14 +496,11 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
487{ 496{
488 unsigned int pd_index = iova_pd_index(iova); 497 unsigned int pd_index = iova_pd_index(iova);
489 struct page *pt_page; 498 struct page *pt_page;
490 u32 *pd;
491 499
492 pd = page_address(as->pd); 500 pt_page = as->pts[pd_index];
493 501 if (!pt_page)
494 if (!pd[pd_index])
495 return NULL; 502 return NULL;
496 503
497 pt_page = pfn_to_page(pd[pd_index] & as->smmu->pfn_mask);
498 *pagep = pt_page; 504 *pagep = pt_page;
499 505
500 return tegra_smmu_pte_offset(pt_page, iova); 506 return tegra_smmu_pte_offset(pt_page, iova);
@@ -509,7 +515,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
509 struct page *page; 515 struct page *page;
510 unsigned int i; 516 unsigned int i;
511 517
512 if (pd[pde] == 0) { 518 if (!as->pts[pde]) {
513 page = alloc_page(GFP_KERNEL | __GFP_DMA); 519 page = alloc_page(GFP_KERNEL | __GFP_DMA);
514 if (!page) 520 if (!page)
515 return NULL; 521 return NULL;
@@ -520,6 +526,8 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
520 for (i = 0; i < SMMU_NUM_PTE; i++) 526 for (i = 0; i < SMMU_NUM_PTE; i++)
521 pt[i] = 0; 527 pt[i] = 0;
522 528
529 as->pts[pde] = page;
530
523 smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT); 531 smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
524 532
525 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT); 533 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
@@ -529,7 +537,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
529 smmu_flush_tlb_section(smmu, as->id, iova); 537 smmu_flush_tlb_section(smmu, as->id, iova);
530 smmu_flush(smmu); 538 smmu_flush(smmu);
531 } else { 539 } else {
532 page = pfn_to_page(pd[pde] & smmu->pfn_mask); 540 page = as->pts[pde];
533 } 541 }
534 542
535 *pagep = page; 543 *pagep = page;
@@ -550,9 +558,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
550 unsigned int pde = iova_pd_index(iova); 558 unsigned int pde = iova_pd_index(iova);
551 u32 *count = page_address(as->count); 559 u32 *count = page_address(as->count);
552 u32 *pd = page_address(as->pd); 560 u32 *pd = page_address(as->pd);
553 struct page *page; 561 struct page *page = as->pts[pde];
554
555 page = pfn_to_page(pd[pde] & smmu->pfn_mask);
556 562
557 /* 563 /*
558 * When no entries in this page table are used anymore, return the 564 * When no entries in this page table are used anymore, return the
@@ -573,6 +579,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
573 /* Finally, free the page */ 579 /* Finally, free the page */
574 ClearPageReserved(page); 580 ClearPageReserved(page);
575 __free_page(page); 581 __free_page(page);
582 as->pts[pde] = NULL;
576 } 583 }
577} 584}
578 585