diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2015-07-27 08:29:52 -0400 |
---|---|---|
committer | Thierry Reding <treding@nvidia.com> | 2015-08-13 10:06:42 -0400 |
commit | e3c971960fd41fc55235ba05b95e053355cb0e73 (patch) | |
tree | 43428c3ef79e66c1b022eefa5056ecdf42019b56 /drivers/iommu/tegra-smmu.c | |
parent | d62c7a886c2bc9f9258164814245dc0678b9a52e (diff) |
iommu/tegra-smmu: Convert to use DMA API
Use the DMA API instead of calling architecture internal functions in
the Tegra SMMU driver.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Diffstat (limited to 'drivers/iommu/tegra-smmu.c')
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 139 |
1 files changed, 85 insertions, 54 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index f420d8718535..43b69c8cbe46 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c | |||
@@ -16,8 +16,6 @@ | |||
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | 18 | ||
19 | #include <asm/cacheflush.h> | ||
20 | |||
21 | #include <soc/tegra/ahb.h> | 19 | #include <soc/tegra/ahb.h> |
22 | #include <soc/tegra/mc.h> | 20 | #include <soc/tegra/mc.h> |
23 | 21 | ||
@@ -45,6 +43,7 @@ struct tegra_smmu_as { | |||
45 | u32 *count; | 43 | u32 *count; |
46 | struct page **pts; | 44 | struct page **pts; |
47 | struct page *pd; | 45 | struct page *pd; |
46 | dma_addr_t pd_dma; | ||
48 | unsigned id; | 47 | unsigned id; |
49 | u32 attr; | 48 | u32 attr; |
50 | }; | 49 | }; |
@@ -82,9 +81,9 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) | |||
82 | #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f) | 81 | #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f) |
83 | 82 | ||
84 | #define SMMU_PTB_DATA 0x020 | 83 | #define SMMU_PTB_DATA 0x020 |
85 | #define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr)) | 84 | #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr)) |
86 | 85 | ||
87 | #define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr)) | 86 | #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr)) |
88 | 87 | ||
89 | #define SMMU_TLB_FLUSH 0x030 | 88 | #define SMMU_TLB_FLUSH 0x030 |
90 | #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) | 89 | #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) |
@@ -147,22 +146,15 @@ static unsigned int iova_pt_index(unsigned long iova) | |||
147 | return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); | 146 | return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); |
148 | } | 147 | } |
149 | 148 | ||
150 | static void smmu_flush_dcache(struct page *page, unsigned long offset, | 149 | static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) |
151 | size_t size) | ||
152 | { | 150 | { |
153 | #ifdef CONFIG_ARM | 151 | addr >>= 12; |
154 | phys_addr_t phys = page_to_phys(page) + offset; | 152 | return (addr & smmu->pfn_mask) == addr; |
155 | #endif | 153 | } |
156 | void *virt = page_address(page) + offset; | ||
157 | |||
158 | #ifdef CONFIG_ARM | ||
159 | __cpuc_flush_dcache_area(virt, size); | ||
160 | outer_flush_range(phys, phys + size); | ||
161 | #endif | ||
162 | 154 | ||
163 | #ifdef CONFIG_ARM64 | 155 | static dma_addr_t smmu_pde_to_dma(u32 pde) |
164 | __flush_dcache_area(virt, size); | 156 | { |
165 | #endif | 157 | return pde << 12; |
166 | } | 158 | } |
167 | 159 | ||
168 | static void smmu_flush_ptc_all(struct tegra_smmu *smmu) | 160 | static void smmu_flush_ptc_all(struct tegra_smmu *smmu) |
@@ -170,7 +162,7 @@ static void smmu_flush_ptc_all(struct tegra_smmu *smmu) | |||
170 | smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); | 162 | smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); |
171 | } | 163 | } |
172 | 164 | ||
173 | static inline void smmu_flush_ptc(struct tegra_smmu *smmu, phys_addr_t phys, | 165 | static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma, |
174 | unsigned long offset) | 166 | unsigned long offset) |
175 | { | 167 | { |
176 | u32 value; | 168 | u32 value; |
@@ -178,15 +170,15 @@ static inline void smmu_flush_ptc(struct tegra_smmu *smmu, phys_addr_t phys, | |||
178 | offset &= ~(smmu->mc->soc->atom_size - 1); | 170 | offset &= ~(smmu->mc->soc->atom_size - 1); |
179 | 171 | ||
180 | if (smmu->mc->soc->num_address_bits > 32) { | 172 | if (smmu->mc->soc->num_address_bits > 32) { |
181 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | 173 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
182 | value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK; | 174 | value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK; |
183 | #else | 175 | #else |
184 | value = 0; | 176 | value = 0; |
185 | #endif | 177 | #endif |
186 | smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); | 178 | smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); |
187 | } | 179 | } |
188 | 180 | ||
189 | value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR; | 181 | value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR; |
190 | smmu_writel(smmu, value, SMMU_PTC_FLUSH); | 182 | smmu_writel(smmu, value, SMMU_PTC_FLUSH); |
191 | } | 183 | } |
192 | 184 | ||
@@ -407,16 +399,26 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, | |||
407 | return 0; | 399 | return 0; |
408 | } | 400 | } |
409 | 401 | ||
402 | as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, | ||
403 | DMA_TO_DEVICE); | ||
404 | if (dma_mapping_error(smmu->dev, as->pd_dma)) | ||
405 | return -ENOMEM; | ||
406 | |||
407 | /* We can't handle 64-bit DMA addresses */ | ||
408 | if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { | ||
409 | err = -ENOMEM; | ||
410 | goto err_unmap; | ||
411 | } | ||
412 | |||
410 | err = tegra_smmu_alloc_asid(smmu, &as->id); | 413 | err = tegra_smmu_alloc_asid(smmu, &as->id); |
411 | if (err < 0) | 414 | if (err < 0) |
412 | return err; | 415 | goto err_unmap; |
413 | 416 | ||
414 | smmu_flush_dcache(as->pd, 0, SMMU_SIZE_PD); | 417 | smmu_flush_ptc(smmu, as->pd_dma, 0); |
415 | smmu_flush_ptc(smmu, page_to_phys(as->pd), 0); | ||
416 | smmu_flush_tlb_asid(smmu, as->id); | 418 | smmu_flush_tlb_asid(smmu, as->id); |
417 | 419 | ||
418 | smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); | 420 | smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); |
419 | value = SMMU_PTB_DATA_VALUE(as->pd, as->attr); | 421 | value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr); |
420 | smmu_writel(smmu, value, SMMU_PTB_DATA); | 422 | smmu_writel(smmu, value, SMMU_PTB_DATA); |
421 | smmu_flush(smmu); | 423 | smmu_flush(smmu); |
422 | 424 | ||
@@ -424,6 +426,10 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, | |||
424 | as->use_count++; | 426 | as->use_count++; |
425 | 427 | ||
426 | return 0; | 428 | return 0; |
429 | |||
430 | err_unmap: | ||
431 | dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); | ||
432 | return err; | ||
427 | } | 433 | } |
428 | 434 | ||
429 | static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, | 435 | static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, |
@@ -433,6 +439,9 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, | |||
433 | return; | 439 | return; |
434 | 440 | ||
435 | tegra_smmu_free_asid(smmu, as->id); | 441 | tegra_smmu_free_asid(smmu, as->id); |
442 | |||
443 | dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); | ||
444 | |||
436 | as->smmu = NULL; | 445 | as->smmu = NULL; |
437 | } | 446 | } |
438 | 447 | ||
@@ -504,63 +513,81 @@ static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova) | |||
504 | } | 513 | } |
505 | 514 | ||
506 | static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, | 515 | static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, |
507 | struct page **pagep) | 516 | dma_addr_t *dmap) |
508 | { | 517 | { |
509 | unsigned int pd_index = iova_pd_index(iova); | 518 | unsigned int pd_index = iova_pd_index(iova); |
510 | struct page *pt_page; | 519 | struct page *pt_page; |
520 | u32 *pd; | ||
511 | 521 | ||
512 | pt_page = as->pts[pd_index]; | 522 | pt_page = as->pts[pd_index]; |
513 | if (!pt_page) | 523 | if (!pt_page) |
514 | return NULL; | 524 | return NULL; |
515 | 525 | ||
516 | *pagep = pt_page; | 526 | pd = page_address(as->pd); |
527 | *dmap = smmu_pde_to_dma(pd[pd_index]); | ||
517 | 528 | ||
518 | return tegra_smmu_pte_offset(pt_page, iova); | 529 | return tegra_smmu_pte_offset(pt_page, iova); |
519 | } | 530 | } |
520 | 531 | ||
521 | static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, | 532 | static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, |
522 | struct page **pagep) | 533 | dma_addr_t *dmap) |
523 | { | 534 | { |
524 | u32 *pd = page_address(as->pd), *pt; | 535 | u32 *pd = page_address(as->pd), *pt; |
525 | unsigned int pde = iova_pd_index(iova); | 536 | unsigned int pde = iova_pd_index(iova); |
526 | struct tegra_smmu *smmu = as->smmu; | 537 | struct tegra_smmu *smmu = as->smmu; |
527 | struct page *page; | ||
528 | unsigned int i; | 538 | unsigned int i; |
529 | 539 | ||
530 | if (!as->pts[pde]) { | 540 | if (!as->pts[pde]) { |
541 | struct page *page; | ||
542 | dma_addr_t dma; | ||
543 | |||
531 | page = alloc_page(GFP_KERNEL | __GFP_DMA); | 544 | page = alloc_page(GFP_KERNEL | __GFP_DMA); |
532 | if (!page) | 545 | if (!page) |
533 | return NULL; | 546 | return NULL; |
534 | 547 | ||
535 | pt = page_address(page); | 548 | pt = page_address(page); |
536 | SetPageReserved(page); | ||
537 | 549 | ||
538 | for (i = 0; i < SMMU_NUM_PTE; i++) | 550 | for (i = 0; i < SMMU_NUM_PTE; i++) |
539 | pt[i] = 0; | 551 | pt[i] = 0; |
540 | 552 | ||
553 | dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT, | ||
554 | DMA_TO_DEVICE); | ||
555 | if (dma_mapping_error(smmu->dev, dma)) { | ||
556 | __free_page(page); | ||
557 | return NULL; | ||
558 | } | ||
559 | |||
560 | if (!smmu_dma_addr_valid(smmu, dma)) { | ||
561 | dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT, | ||
562 | DMA_TO_DEVICE); | ||
563 | __free_page(page); | ||
564 | return NULL; | ||
565 | } | ||
566 | |||
541 | as->pts[pde] = page; | 567 | as->pts[pde] = page; |
542 | 568 | ||
543 | smmu_flush_dcache(page, 0, SMMU_SIZE_PT); | 569 | SetPageReserved(page); |
544 | 570 | ||
545 | pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT); | 571 | pd[pde] = SMMU_MK_PDE(dma, SMMU_PDE_ATTR | SMMU_PDE_NEXT); |
546 | 572 | ||
547 | smmu_flush_dcache(as->pd, pde << 2, 4); | 573 | dma_sync_single_range_for_device(smmu->dev, as->pd_dma, |
548 | smmu_flush_ptc(smmu, page_to_phys(as->pd), pde << 2); | 574 | pde << 2, 4, DMA_TO_DEVICE); |
575 | smmu_flush_ptc(smmu, as->pd_dma, pde << 2); | ||
549 | smmu_flush_tlb_section(smmu, as->id, iova); | 576 | smmu_flush_tlb_section(smmu, as->id, iova); |
550 | smmu_flush(smmu); | 577 | smmu_flush(smmu); |
578 | |||
579 | *dmap = dma; | ||
551 | } else { | 580 | } else { |
552 | page = as->pts[pde]; | 581 | *dmap = smmu_pde_to_dma(pd[pde]); |
553 | } | 582 | } |
554 | 583 | ||
555 | *pagep = page; | 584 | pt = tegra_smmu_pte_offset(as->pts[pde], iova); |
556 | |||
557 | pt = page_address(page); | ||
558 | 585 | ||
559 | /* Keep track of entries in this page table. */ | 586 | /* Keep track of entries in this page table. */ |
560 | if (pt[iova_pt_index(iova)] == 0) | 587 | if (*pt == 0) |
561 | as->count[pde]++; | 588 | as->count[pde]++; |
562 | 589 | ||
563 | return tegra_smmu_pte_offset(page, iova); | 590 | return pt; |
564 | } | 591 | } |
565 | 592 | ||
566 | static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) | 593 | static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) |
@@ -576,17 +603,20 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) | |||
576 | */ | 603 | */ |
577 | if (--as->count[pde] == 0) { | 604 | if (--as->count[pde] == 0) { |
578 | unsigned int offset = pde * sizeof(*pd); | 605 | unsigned int offset = pde * sizeof(*pd); |
606 | dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]); | ||
579 | 607 | ||
580 | /* Clear the page directory entry first */ | 608 | /* Clear the page directory entry first */ |
581 | pd[pde] = 0; | 609 | pd[pde] = 0; |
582 | 610 | ||
583 | /* Flush the page directory entry */ | 611 | /* Flush the page directory entry */ |
584 | smmu_flush_dcache(as->pd, offset, sizeof(*pd)); | 612 | dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset, |
585 | smmu_flush_ptc(smmu, page_to_phys(as->pd), offset); | 613 | sizeof(*pd), DMA_TO_DEVICE); |
614 | smmu_flush_ptc(smmu, as->pd_dma, offset); | ||
586 | smmu_flush_tlb_section(smmu, as->id, iova); | 615 | smmu_flush_tlb_section(smmu, as->id, iova); |
587 | smmu_flush(smmu); | 616 | smmu_flush(smmu); |
588 | 617 | ||
589 | /* Finally, free the page */ | 618 | /* Finally, free the page */ |
619 | dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE); | ||
590 | ClearPageReserved(page); | 620 | ClearPageReserved(page); |
591 | __free_page(page); | 621 | __free_page(page); |
592 | as->pts[pde] = NULL; | 622 | as->pts[pde] = NULL; |
@@ -594,15 +624,16 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) | |||
594 | } | 624 | } |
595 | 625 | ||
596 | static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, | 626 | static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, |
597 | u32 *pte, struct page *pte_page, u32 val) | 627 | u32 *pte, dma_addr_t pte_dma, u32 val) |
598 | { | 628 | { |
599 | struct tegra_smmu *smmu = as->smmu; | 629 | struct tegra_smmu *smmu = as->smmu; |
600 | unsigned long offset = offset_in_page(pte); | 630 | unsigned long offset = offset_in_page(pte); |
601 | 631 | ||
602 | *pte = val; | 632 | *pte = val; |
603 | 633 | ||
604 | smmu_flush_dcache(pte_page, offset, 4); | 634 | dma_sync_single_range_for_device(smmu->dev, pte_dma, offset, |
605 | smmu_flush_ptc(smmu, page_to_phys(pte_page), offset); | 635 | 4, DMA_TO_DEVICE); |
636 | smmu_flush_ptc(smmu, pte_dma, offset); | ||
606 | smmu_flush_tlb_group(smmu, as->id, iova); | 637 | smmu_flush_tlb_group(smmu, as->id, iova); |
607 | smmu_flush(smmu); | 638 | smmu_flush(smmu); |
608 | } | 639 | } |
@@ -611,14 +642,14 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, | |||
611 | phys_addr_t paddr, size_t size, int prot) | 642 | phys_addr_t paddr, size_t size, int prot) |
612 | { | 643 | { |
613 | struct tegra_smmu_as *as = to_smmu_as(domain); | 644 | struct tegra_smmu_as *as = to_smmu_as(domain); |
614 | struct page *page; | 645 | dma_addr_t pte_dma; |
615 | u32 *pte; | 646 | u32 *pte; |
616 | 647 | ||
617 | pte = as_get_pte(as, iova, &page); | 648 | pte = as_get_pte(as, iova, &pte_dma); |
618 | if (!pte) | 649 | if (!pte) |
619 | return -ENOMEM; | 650 | return -ENOMEM; |
620 | 651 | ||
621 | tegra_smmu_set_pte(as, iova, pte, page, | 652 | tegra_smmu_set_pte(as, iova, pte, pte_dma, |
622 | __phys_to_pfn(paddr) | SMMU_PTE_ATTR); | 653 | __phys_to_pfn(paddr) | SMMU_PTE_ATTR); |
623 | 654 | ||
624 | return 0; | 655 | return 0; |
@@ -628,14 +659,14 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | |||
628 | size_t size) | 659 | size_t size) |
629 | { | 660 | { |
630 | struct tegra_smmu_as *as = to_smmu_as(domain); | 661 | struct tegra_smmu_as *as = to_smmu_as(domain); |
631 | struct page *pte_page; | 662 | dma_addr_t pte_dma; |
632 | u32 *pte; | 663 | u32 *pte; |
633 | 664 | ||
634 | pte = tegra_smmu_pte_lookup(as, iova, &pte_page); | 665 | pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); |
635 | if (!pte || !*pte) | 666 | if (!pte || !*pte) |
636 | return 0; | 667 | return 0; |
637 | 668 | ||
638 | tegra_smmu_set_pte(as, iova, pte, pte_page, 0); | 669 | tegra_smmu_set_pte(as, iova, pte, pte_dma, 0); |
639 | tegra_smmu_pte_put_use(as, iova); | 670 | tegra_smmu_pte_put_use(as, iova); |
640 | 671 | ||
641 | return size; | 672 | return size; |
@@ -645,11 +676,11 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, | |||
645 | dma_addr_t iova) | 676 | dma_addr_t iova) |
646 | { | 677 | { |
647 | struct tegra_smmu_as *as = to_smmu_as(domain); | 678 | struct tegra_smmu_as *as = to_smmu_as(domain); |
648 | struct page *pte_page; | ||
649 | unsigned long pfn; | 679 | unsigned long pfn; |
680 | dma_addr_t pte_dma; | ||
650 | u32 *pte; | 681 | u32 *pte; |
651 | 682 | ||
652 | pte = tegra_smmu_pte_lookup(as, iova, &pte_page); | 683 | pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); |
653 | if (!pte || !*pte) | 684 | if (!pte || !*pte) |
654 | return 0; | 685 | return 0; |
655 | 686 | ||