diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2015-07-27 08:29:31 -0400 |
---|---|---|
committer | Thierry Reding <treding@nvidia.com> | 2015-08-13 10:06:40 -0400 |
commit | 32924c76b0cbc67aa4cf0741f7bc6c37f097aaf3 (patch) | |
tree | ac81012aeb76c50918a59aed8c4dcac85561ded6 | |
parent | 853520fa96511e4a49942d2cba34a329528c7e41 (diff) |
iommu/tegra-smmu: Use kcalloc() to allocate counter array
Use kcalloc() to allocate the use-counter array for the page directory
entries/page tables. Using kcalloc() allows us to be provided with
zero-initialised memory from the allocators, rather than initialising
it ourselves.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Thierry Reding <treding@nvidia.com>
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 21 |
1 files changed, 6 insertions, 15 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 8ec5ac45caab..d649b06cc4ca 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c | |||
@@ -40,7 +40,7 @@ struct tegra_smmu_as { | |||
40 | struct iommu_domain domain; | 40 | struct iommu_domain domain; |
41 | struct tegra_smmu *smmu; | 41 | struct tegra_smmu *smmu; |
42 | unsigned int use_count; | 42 | unsigned int use_count; |
43 | struct page *count; | 43 | u32 *count; |
44 | struct page **pts; | 44 | struct page **pts; |
45 | struct page *pd; | 45 | struct page *pd; |
46 | unsigned id; | 46 | unsigned id; |
@@ -265,7 +265,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type) | |||
265 | return NULL; | 265 | return NULL; |
266 | } | 266 | } |
267 | 267 | ||
268 | as->count = alloc_page(GFP_KERNEL); | 268 | as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL); |
269 | if (!as->count) { | 269 | if (!as->count) { |
270 | __free_page(as->pd); | 270 | __free_page(as->pd); |
271 | kfree(as); | 271 | kfree(as); |
@@ -274,7 +274,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type) | |||
274 | 274 | ||
275 | as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); | 275 | as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); |
276 | if (!as->pts) { | 276 | if (!as->pts) { |
277 | __free_page(as->count); | 277 | kfree(as->count); |
278 | __free_page(as->pd); | 278 | __free_page(as->pd); |
279 | kfree(as); | 279 | kfree(as); |
280 | return NULL; | 280 | return NULL; |
@@ -287,13 +287,6 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type) | |||
287 | for (i = 0; i < SMMU_NUM_PDE; i++) | 287 | for (i = 0; i < SMMU_NUM_PDE; i++) |
288 | pd[i] = 0; | 288 | pd[i] = 0; |
289 | 289 | ||
290 | /* clear PDE usage counters */ | ||
291 | pd = page_address(as->count); | ||
292 | SetPageReserved(as->count); | ||
293 | |||
294 | for (i = 0; i < SMMU_NUM_PDE; i++) | ||
295 | pd[i] = 0; | ||
296 | |||
297 | /* setup aperture */ | 290 | /* setup aperture */ |
298 | as->domain.geometry.aperture_start = 0; | 291 | as->domain.geometry.aperture_start = 0; |
299 | as->domain.geometry.aperture_end = 0xffffffff; | 292 | as->domain.geometry.aperture_end = 0xffffffff; |
@@ -509,7 +502,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, | |||
509 | static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, | 502 | static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, |
510 | struct page **pagep) | 503 | struct page **pagep) |
511 | { | 504 | { |
512 | u32 *pd = page_address(as->pd), *pt, *count; | 505 | u32 *pd = page_address(as->pd), *pt; |
513 | unsigned int pde = iova_pd_index(iova); | 506 | unsigned int pde = iova_pd_index(iova); |
514 | struct tegra_smmu *smmu = as->smmu; | 507 | struct tegra_smmu *smmu = as->smmu; |
515 | struct page *page; | 508 | struct page *page; |
@@ -545,9 +538,8 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, | |||
545 | pt = page_address(page); | 538 | pt = page_address(page); |
546 | 539 | ||
547 | /* Keep track of entries in this page table. */ | 540 | /* Keep track of entries in this page table. */ |
548 | count = page_address(as->count); | ||
549 | if (pt[iova_pt_index(iova)] == 0) | 541 | if (pt[iova_pt_index(iova)] == 0) |
550 | count[pde]++; | 542 | as->count[pde]++; |
551 | 543 | ||
552 | return tegra_smmu_pte_offset(page, iova); | 544 | return tegra_smmu_pte_offset(page, iova); |
553 | } | 545 | } |
@@ -556,7 +548,6 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) | |||
556 | { | 548 | { |
557 | struct tegra_smmu *smmu = as->smmu; | 549 | struct tegra_smmu *smmu = as->smmu; |
558 | unsigned int pde = iova_pd_index(iova); | 550 | unsigned int pde = iova_pd_index(iova); |
559 | u32 *count = page_address(as->count); | ||
560 | u32 *pd = page_address(as->pd); | 551 | u32 *pd = page_address(as->pd); |
561 | struct page *page = as->pts[pde]; | 552 | struct page *page = as->pts[pde]; |
562 | 553 | ||
@@ -564,7 +555,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) | |||
564 | * When no entries in this page table are used anymore, return the | 555 | * When no entries in this page table are used anymore, return the |
565 | * memory page to the system. | 556 | * memory page to the system. |
566 | */ | 557 | */ |
567 | if (--count[pde] == 0) { | 558 | if (--as->count[pde] == 0) { |
568 | unsigned int offset = pde * sizeof(*pd); | 559 | unsigned int offset = pde * sizeof(*pd); |
569 | 560 | ||
570 | /* Clear the page directory entry first */ | 561 | /* Clear the page directory entry first */ |