diff options
| -rw-r--r-- | drivers/iommu/Kconfig | 1 | ||||
| -rw-r--r-- | drivers/iommu/ipmmu-vmsa.c | 554 |
2 files changed, 61 insertions, 494 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 87060ad6829d..b7c656c84f51 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
| @@ -313,6 +313,7 @@ config IPMMU_VMSA | |||
| 313 | depends on ARM_LPAE | 313 | depends on ARM_LPAE |
| 314 | depends on ARCH_SHMOBILE || COMPILE_TEST | 314 | depends on ARCH_SHMOBILE || COMPILE_TEST |
| 315 | select IOMMU_API | 315 | select IOMMU_API |
| 316 | select IOMMU_IO_PGTABLE_LPAE | ||
| 316 | select ARM_DMA_USE_IOMMU | 317 | select ARM_DMA_USE_IOMMU |
| 317 | help | 318 | help |
| 318 | Support for the Renesas VMSA-compatible IPMMU Renesas found in the | 319 | Support for the Renesas VMSA-compatible IPMMU Renesas found in the |
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 791c3daec7c0..3d7e7325a1e5 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
| @@ -24,6 +24,8 @@ | |||
| 24 | #include <asm/dma-iommu.h> | 24 | #include <asm/dma-iommu.h> |
| 25 | #include <asm/pgalloc.h> | 25 | #include <asm/pgalloc.h> |
| 26 | 26 | ||
| 27 | #include "io-pgtable.h" | ||
| 28 | |||
| 27 | struct ipmmu_vmsa_device { | 29 | struct ipmmu_vmsa_device { |
| 28 | struct device *dev; | 30 | struct device *dev; |
| 29 | void __iomem *base; | 31 | void __iomem *base; |
| @@ -38,9 +40,11 @@ struct ipmmu_vmsa_domain { | |||
| 38 | struct ipmmu_vmsa_device *mmu; | 40 | struct ipmmu_vmsa_device *mmu; |
| 39 | struct iommu_domain *io_domain; | 41 | struct iommu_domain *io_domain; |
| 40 | 42 | ||
| 43 | struct io_pgtable_cfg cfg; | ||
| 44 | struct io_pgtable_ops *iop; | ||
| 45 | |||
| 41 | unsigned int context_id; | 46 | unsigned int context_id; |
| 42 | spinlock_t lock; /* Protects mappings */ | 47 | spinlock_t lock; /* Protects mappings */ |
| 43 | pgd_t *pgd; | ||
| 44 | }; | 48 | }; |
| 45 | 49 | ||
| 46 | struct ipmmu_vmsa_archdata { | 50 | struct ipmmu_vmsa_archdata { |
| @@ -173,52 +177,6 @@ static LIST_HEAD(ipmmu_devices); | |||
| 173 | #define IMUASID_ASID0_SHIFT 0 | 177 | #define IMUASID_ASID0_SHIFT 0 |
| 174 | 178 | ||
| 175 | /* ----------------------------------------------------------------------------- | 179 | /* ----------------------------------------------------------------------------- |
| 176 | * Page Table Bits | ||
| 177 | */ | ||
| 178 | |||
| 179 | /* | ||
| 180 | * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory access, | ||
| 181 | * Long-descriptor format" that the NStable bit being set in a table descriptor | ||
| 182 | * will result in the NStable and NS bits of all child entries being ignored and | ||
| 183 | * considered as being set. The IPMMU seems not to comply with this, as it | ||
| 184 | * generates a secure access page fault if any of the NStable and NS bits isn't | ||
| 185 | * set when running in non-secure mode. | ||
| 186 | */ | ||
| 187 | #ifndef PMD_NSTABLE | ||
| 188 | #define PMD_NSTABLE (_AT(pmdval_t, 1) << 63) | ||
| 189 | #endif | ||
| 190 | |||
| 191 | #define ARM_VMSA_PTE_XN (((pteval_t)3) << 53) | ||
| 192 | #define ARM_VMSA_PTE_CONT (((pteval_t)1) << 52) | ||
| 193 | #define ARM_VMSA_PTE_AF (((pteval_t)1) << 10) | ||
| 194 | #define ARM_VMSA_PTE_SH_NS (((pteval_t)0) << 8) | ||
| 195 | #define ARM_VMSA_PTE_SH_OS (((pteval_t)2) << 8) | ||
| 196 | #define ARM_VMSA_PTE_SH_IS (((pteval_t)3) << 8) | ||
| 197 | #define ARM_VMSA_PTE_SH_MASK (((pteval_t)3) << 8) | ||
| 198 | #define ARM_VMSA_PTE_NS (((pteval_t)1) << 5) | ||
| 199 | #define ARM_VMSA_PTE_PAGE (((pteval_t)3) << 0) | ||
| 200 | |||
| 201 | /* Stage-1 PTE */ | ||
| 202 | #define ARM_VMSA_PTE_nG (((pteval_t)1) << 11) | ||
| 203 | #define ARM_VMSA_PTE_AP_UNPRIV (((pteval_t)1) << 6) | ||
| 204 | #define ARM_VMSA_PTE_AP_RDONLY (((pteval_t)2) << 6) | ||
| 205 | #define ARM_VMSA_PTE_AP_MASK (((pteval_t)3) << 6) | ||
| 206 | #define ARM_VMSA_PTE_ATTRINDX_MASK (((pteval_t)3) << 2) | ||
| 207 | #define ARM_VMSA_PTE_ATTRINDX_SHIFT 2 | ||
| 208 | |||
| 209 | #define ARM_VMSA_PTE_ATTRS_MASK \ | ||
| 210 | (ARM_VMSA_PTE_XN | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_nG | \ | ||
| 211 | ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_MASK | ARM_VMSA_PTE_AP_MASK | \ | ||
| 212 | ARM_VMSA_PTE_NS | ARM_VMSA_PTE_ATTRINDX_MASK) | ||
| 213 | |||
| 214 | #define ARM_VMSA_PTE_CONT_ENTRIES 16 | ||
| 215 | #define ARM_VMSA_PTE_CONT_SIZE (PAGE_SIZE * ARM_VMSA_PTE_CONT_ENTRIES) | ||
| 216 | |||
| 217 | #define IPMMU_PTRS_PER_PTE 512 | ||
| 218 | #define IPMMU_PTRS_PER_PMD 512 | ||
| 219 | #define IPMMU_PTRS_PER_PGD 4 | ||
| 220 | |||
| 221 | /* ----------------------------------------------------------------------------- | ||
| 222 | * Read/Write Access | 180 | * Read/Write Access |
| 223 | */ | 181 | */ |
| 224 | 182 | ||
| @@ -307,18 +265,39 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, | |||
| 307 | ipmmu_write(mmu, IMUCTR(utlb), 0); | 265 | ipmmu_write(mmu, IMUCTR(utlb), 0); |
| 308 | } | 266 | } |
| 309 | 267 | ||
| 310 | static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr, | 268 | static void ipmmu_tlb_flush_all(void *cookie) |
| 311 | size_t size) | ||
| 312 | { | 269 | { |
| 313 | unsigned long offset = (unsigned long)addr & ~PAGE_MASK; | 270 | struct ipmmu_vmsa_domain *domain = cookie; |
| 271 | |||
| 272 | ipmmu_tlb_invalidate(domain); | ||
| 273 | } | ||
| 274 | |||
| 275 | static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf, | ||
| 276 | void *cookie) | ||
| 277 | { | ||
| 278 | /* The hardware doesn't support selective TLB flush. */ | ||
| 279 | } | ||
| 280 | |||
| 281 | static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie) | ||
| 282 | { | ||
| 283 | unsigned long offset = (unsigned long)ptr & ~PAGE_MASK; | ||
| 284 | struct ipmmu_vmsa_domain *domain = cookie; | ||
| 314 | 285 | ||
| 315 | /* | 286 | /* |
| 316 | * TODO: Add support for coherent walk through CCI with DVM and remove | 287 | * TODO: Add support for coherent walk through CCI with DVM and remove |
| 317 | * cache handling. | 288 | * cache handling. |
| 318 | */ | 289 | */ |
| 319 | dma_map_page(mmu->dev, virt_to_page(addr), offset, size, DMA_TO_DEVICE); | 290 | dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size, |
| 291 | DMA_TO_DEVICE); | ||
| 320 | } | 292 | } |
| 321 | 293 | ||
| 294 | static struct iommu_gather_ops ipmmu_gather_ops = { | ||
| 295 | .tlb_flush_all = ipmmu_tlb_flush_all, | ||
| 296 | .tlb_add_flush = ipmmu_tlb_add_flush, | ||
| 297 | .tlb_sync = ipmmu_tlb_flush_all, | ||
| 298 | .flush_pgtable = ipmmu_flush_pgtable, | ||
| 299 | }; | ||
| 300 | |||
| 322 | /* ----------------------------------------------------------------------------- | 301 | /* ----------------------------------------------------------------------------- |
| 323 | * Domain/Context Management | 302 | * Domain/Context Management |
| 324 | */ | 303 | */ |
| @@ -326,7 +305,28 @@ static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr, | |||
| 326 | static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) | 305 | static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) |
| 327 | { | 306 | { |
| 328 | phys_addr_t ttbr; | 307 | phys_addr_t ttbr; |
| 329 | u32 reg; | 308 | |
| 309 | /* | ||
| 310 | * Allocate the page table operations. | ||
| 311 | * | ||
| 312 | * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory | ||
| 313 | * access, Long-descriptor format" that the NStable bit being set in a | ||
| 314 | * table descriptor will result in the NStable and NS bits of all child | ||
| 315 | * entries being ignored and considered as being set. The IPMMU seems | ||
| 316 | * not to comply with this, as it generates a secure access page fault | ||
| 317 | * if any of the NStable and NS bits isn't set when running in | ||
| 318 | * non-secure mode. | ||
| 319 | */ | ||
| 320 | domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; | ||
| 321 | domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, | ||
| 322 | domain->cfg.ias = 32; | ||
| 323 | domain->cfg.oas = 40; | ||
| 324 | domain->cfg.tlb = &ipmmu_gather_ops; | ||
| 325 | |||
| 326 | domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, | ||
| 327 | domain); | ||
| 328 | if (!domain->iop) | ||
| 329 | return -EINVAL; | ||
| 330 | 330 | ||
| 331 | /* | 331 | /* |
| 332 | * TODO: When adding support for multiple contexts, find an unused | 332 | * TODO: When adding support for multiple contexts, find an unused |
| @@ -335,9 +335,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) | |||
| 335 | domain->context_id = 0; | 335 | domain->context_id = 0; |
| 336 | 336 | ||
| 337 | /* TTBR0 */ | 337 | /* TTBR0 */ |
| 338 | ipmmu_flush_pgtable(domain->mmu, domain->pgd, | 338 | ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; |
| 339 | IPMMU_PTRS_PER_PGD * sizeof(*domain->pgd)); | ||
| 340 | ttbr = __pa(domain->pgd); | ||
| 341 | ipmmu_ctx_write(domain, IMTTLBR0, ttbr); | 339 | ipmmu_ctx_write(domain, IMTTLBR0, ttbr); |
| 342 | ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); | 340 | ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); |
| 343 | 341 | ||
| @@ -350,15 +348,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) | |||
| 350 | IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | | 348 | IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | |
| 351 | IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); | 349 | IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); |
| 352 | 350 | ||
| 353 | /* | 351 | /* MAIR0 */ |
| 354 | * MAIR0 | 352 | ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]); |
| 355 | * We need three attributes only, non-cacheable, write-back read/write | ||
| 356 | * allocate and device memory. | ||
| 357 | */ | ||
| 358 | reg = (IMMAIR_ATTR_NC << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_NC)) | ||
| 359 | | (IMMAIR_ATTR_WBRWA << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_WBRWA)) | ||
| 360 | | (IMMAIR_ATTR_DEVICE << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_DEV)); | ||
| 361 | ipmmu_ctx_write(domain, IMMAIR0, reg); | ||
| 362 | 353 | ||
| 363 | /* IMBUSCR */ | 354 | /* IMBUSCR */ |
| 364 | ipmmu_ctx_write(domain, IMBUSCR, | 355 | ipmmu_ctx_write(domain, IMBUSCR, |
| @@ -463,397 +454,6 @@ static irqreturn_t ipmmu_irq(int irq, void *dev) | |||
| 463 | } | 454 | } |
| 464 | 455 | ||
| 465 | /* ----------------------------------------------------------------------------- | 456 | /* ----------------------------------------------------------------------------- |
| 466 | * Page Table Management | ||
| 467 | */ | ||
| 468 | |||
| 469 | #define pud_pgtable(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) | ||
| 470 | |||
| 471 | static void ipmmu_free_ptes(pmd_t *pmd) | ||
| 472 | { | ||
| 473 | pgtable_t table = pmd_pgtable(*pmd); | ||
| 474 | __free_page(table); | ||
| 475 | } | ||
| 476 | |||
| 477 | static void ipmmu_free_pmds(pud_t *pud) | ||
| 478 | { | ||
| 479 | pmd_t *pmd = pmd_offset(pud, 0); | ||
| 480 | pgtable_t table; | ||
| 481 | unsigned int i; | ||
| 482 | |||
| 483 | for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) { | ||
| 484 | if (!pmd_table(*pmd)) | ||
| 485 | continue; | ||
| 486 | |||
| 487 | ipmmu_free_ptes(pmd); | ||
| 488 | pmd++; | ||
| 489 | } | ||
| 490 | |||
| 491 | table = pud_pgtable(*pud); | ||
| 492 | __free_page(table); | ||
| 493 | } | ||
| 494 | |||
| 495 | static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain) | ||
| 496 | { | ||
| 497 | pgd_t *pgd, *pgd_base = domain->pgd; | ||
| 498 | unsigned int i; | ||
| 499 | |||
| 500 | /* | ||
| 501 | * Recursively free the page tables for this domain. We don't care about | ||
| 502 | * speculative TLB filling, because the TLB will be nuked next time this | ||
| 503 | * context bank is re-allocated and no devices currently map to these | ||
| 504 | * tables. | ||
| 505 | */ | ||
| 506 | pgd = pgd_base; | ||
| 507 | for (i = 0; i < IPMMU_PTRS_PER_PGD; ++i) { | ||
| 508 | if (pgd_none(*pgd)) | ||
| 509 | continue; | ||
| 510 | ipmmu_free_pmds((pud_t *)pgd); | ||
| 511 | pgd++; | ||
| 512 | } | ||
| 513 | |||
| 514 | kfree(pgd_base); | ||
| 515 | } | ||
| 516 | |||
| 517 | /* | ||
| 518 | * We can't use the (pgd|pud|pmd|pte)_populate or the set_(pgd|pud|pmd|pte) | ||
| 519 | * functions as they would flush the CPU TLB. | ||
| 520 | */ | ||
| 521 | |||
| 522 | static pte_t *ipmmu_alloc_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, | ||
| 523 | unsigned long iova) | ||
| 524 | { | ||
| 525 | pte_t *pte; | ||
| 526 | |||
| 527 | if (!pmd_none(*pmd)) | ||
| 528 | return pte_offset_kernel(pmd, iova); | ||
| 529 | |||
| 530 | pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); | ||
| 531 | if (!pte) | ||
| 532 | return NULL; | ||
| 533 | |||
| 534 | ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE); | ||
| 535 | *pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE); | ||
| 536 | ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); | ||
| 537 | |||
| 538 | return pte + pte_index(iova); | ||
| 539 | } | ||
| 540 | |||
| 541 | static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd, | ||
| 542 | unsigned long iova) | ||
| 543 | { | ||
| 544 | pud_t *pud = (pud_t *)pgd; | ||
| 545 | pmd_t *pmd; | ||
| 546 | |||
| 547 | if (!pud_none(*pud)) | ||
| 548 | return pmd_offset(pud, iova); | ||
| 549 | |||
| 550 | pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); | ||
| 551 | if (!pmd) | ||
| 552 | return NULL; | ||
| 553 | |||
| 554 | ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE); | ||
| 555 | *pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE); | ||
| 556 | ipmmu_flush_pgtable(mmu, pud, sizeof(*pud)); | ||
| 557 | |||
| 558 | return pmd + pmd_index(iova); | ||
| 559 | } | ||
| 560 | |||
| 561 | static u64 ipmmu_page_prot(unsigned int prot, u64 type) | ||
| 562 | { | ||
| 563 | u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF | ||
| 564 | | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV | ||
| 565 | | ARM_VMSA_PTE_NS | type; | ||
| 566 | |||
| 567 | if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) | ||
| 568 | pgprot |= ARM_VMSA_PTE_AP_RDONLY; | ||
| 569 | |||
| 570 | if (prot & IOMMU_CACHE) | ||
| 571 | pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT; | ||
| 572 | |||
| 573 | if (prot & IOMMU_NOEXEC) | ||
| 574 | pgprot |= ARM_VMSA_PTE_XN; | ||
| 575 | else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) | ||
| 576 | /* If no access create a faulting entry to avoid TLB fills. */ | ||
| 577 | pgprot &= ~ARM_VMSA_PTE_PAGE; | ||
| 578 | |||
| 579 | return pgprot; | ||
| 580 | } | ||
| 581 | |||
| 582 | static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, | ||
| 583 | unsigned long iova, unsigned long pfn, | ||
| 584 | size_t size, int prot) | ||
| 585 | { | ||
| 586 | pteval_t pteval = ipmmu_page_prot(prot, ARM_VMSA_PTE_PAGE); | ||
| 587 | unsigned int num_ptes = 1; | ||
| 588 | pte_t *pte, *start; | ||
| 589 | unsigned int i; | ||
| 590 | |||
| 591 | pte = ipmmu_alloc_pte(mmu, pmd, iova); | ||
| 592 | if (!pte) | ||
| 593 | return -ENOMEM; | ||
| 594 | |||
| 595 | start = pte; | ||
| 596 | |||
| 597 | /* | ||
| 598 | * Install the page table entries. We can be called both for a single | ||
| 599 | * page or for a block of 16 physically contiguous pages. In the latter | ||
| 600 | * case set the PTE contiguous hint. | ||
| 601 | */ | ||
| 602 | if (size == SZ_64K) { | ||
| 603 | pteval |= ARM_VMSA_PTE_CONT; | ||
| 604 | num_ptes = ARM_VMSA_PTE_CONT_ENTRIES; | ||
| 605 | } | ||
| 606 | |||
| 607 | for (i = num_ptes; i; --i) | ||
| 608 | *pte++ = pfn_pte(pfn++, __pgprot(pteval)); | ||
| 609 | |||
| 610 | ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * num_ptes); | ||
| 611 | |||
| 612 | return 0; | ||
| 613 | } | ||
| 614 | |||
| 615 | static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, | ||
| 616 | unsigned long iova, unsigned long pfn, | ||
| 617 | int prot) | ||
| 618 | { | ||
| 619 | pmdval_t pmdval = ipmmu_page_prot(prot, PMD_TYPE_SECT); | ||
| 620 | |||
| 621 | *pmd = pfn_pmd(pfn, __pgprot(pmdval)); | ||
| 622 | ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); | ||
| 623 | |||
| 624 | return 0; | ||
| 625 | } | ||
| 626 | |||
| 627 | static int ipmmu_create_mapping(struct ipmmu_vmsa_domain *domain, | ||
| 628 | unsigned long iova, phys_addr_t paddr, | ||
| 629 | size_t size, int prot) | ||
| 630 | { | ||
| 631 | struct ipmmu_vmsa_device *mmu = domain->mmu; | ||
| 632 | pgd_t *pgd = domain->pgd; | ||
| 633 | unsigned long flags; | ||
| 634 | unsigned long pfn; | ||
| 635 | pmd_t *pmd; | ||
| 636 | int ret; | ||
| 637 | |||
| 638 | if (!pgd) | ||
| 639 | return -EINVAL; | ||
| 640 | |||
| 641 | if (size & ~PAGE_MASK) | ||
| 642 | return -EINVAL; | ||
| 643 | |||
| 644 | if (paddr & ~((1ULL << 40) - 1)) | ||
| 645 | return -ERANGE; | ||
| 646 | |||
| 647 | pfn = __phys_to_pfn(paddr); | ||
| 648 | pgd += pgd_index(iova); | ||
| 649 | |||
| 650 | /* Update the page tables. */ | ||
| 651 | spin_lock_irqsave(&domain->lock, flags); | ||
| 652 | |||
| 653 | pmd = ipmmu_alloc_pmd(mmu, pgd, iova); | ||
| 654 | if (!pmd) { | ||
| 655 | ret = -ENOMEM; | ||
| 656 | goto done; | ||
| 657 | } | ||
| 658 | |||
| 659 | switch (size) { | ||
| 660 | case SZ_2M: | ||
| 661 | ret = ipmmu_alloc_init_pmd(mmu, pmd, iova, pfn, prot); | ||
| 662 | break; | ||
| 663 | case SZ_64K: | ||
| 664 | case SZ_4K: | ||
| 665 | ret = ipmmu_alloc_init_pte(mmu, pmd, iova, pfn, size, prot); | ||
| 666 | break; | ||
| 667 | default: | ||
| 668 | ret = -EINVAL; | ||
| 669 | break; | ||
| 670 | } | ||
| 671 | |||
| 672 | done: | ||
| 673 | spin_unlock_irqrestore(&domain->lock, flags); | ||
| 674 | |||
| 675 | if (!ret) | ||
| 676 | ipmmu_tlb_invalidate(domain); | ||
| 677 | |||
| 678 | return ret; | ||
| 679 | } | ||
| 680 | |||
| 681 | static void ipmmu_clear_pud(struct ipmmu_vmsa_device *mmu, pud_t *pud) | ||
| 682 | { | ||
| 683 | pgtable_t table = pud_pgtable(*pud); | ||
| 684 | |||
| 685 | /* Clear the PUD. */ | ||
| 686 | *pud = __pud(0); | ||
| 687 | ipmmu_flush_pgtable(mmu, pud, sizeof(*pud)); | ||
| 688 | |||
| 689 | /* Free the page table. */ | ||
| 690 | __free_page(table); | ||
| 691 | } | ||
| 692 | |||
| 693 | static void ipmmu_clear_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud, | ||
| 694 | pmd_t *pmd) | ||
| 695 | { | ||
| 696 | pmd_t pmdval = *pmd; | ||
| 697 | unsigned int i; | ||
| 698 | |||
| 699 | /* Clear the PMD. */ | ||
| 700 | *pmd = __pmd(0); | ||
| 701 | ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); | ||
| 702 | |||
| 703 | /* Free the page table. */ | ||
| 704 | if (pmd_table(pmdval)) { | ||
| 705 | pgtable_t table = pmd_pgtable(pmdval); | ||
| 706 | |||
| 707 | __free_page(table); | ||
| 708 | } | ||
| 709 | |||
| 710 | /* Check whether the PUD is still needed. */ | ||
| 711 | pmd = pmd_offset(pud, 0); | ||
| 712 | for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) { | ||
| 713 | if (!pmd_none(pmd[i])) | ||
| 714 | return; | ||
| 715 | } | ||
| 716 | |||
| 717 | /* Clear the parent PUD. */ | ||
| 718 | ipmmu_clear_pud(mmu, pud); | ||
| 719 | } | ||
| 720 | |||
| 721 | static void ipmmu_clear_pte(struct ipmmu_vmsa_device *mmu, pud_t *pud, | ||
| 722 | pmd_t *pmd, pte_t *pte, unsigned int num_ptes) | ||
| 723 | { | ||
| 724 | unsigned int i; | ||
| 725 | |||
| 726 | /* Clear the PTE. */ | ||
| 727 | for (i = num_ptes; i; --i) | ||
| 728 | pte[i-1] = __pte(0); | ||
| 729 | |||
| 730 | ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * num_ptes); | ||
| 731 | |||
| 732 | /* Check whether the PMD is still needed. */ | ||
| 733 | pte = pte_offset_kernel(pmd, 0); | ||
| 734 | for (i = 0; i < IPMMU_PTRS_PER_PTE; ++i) { | ||
| 735 | if (!pte_none(pte[i])) | ||
| 736 | return; | ||
| 737 | } | ||
| 738 | |||
| 739 | /* Clear the parent PMD. */ | ||
| 740 | ipmmu_clear_pmd(mmu, pud, pmd); | ||
| 741 | } | ||
| 742 | |||
| 743 | static int ipmmu_split_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd) | ||
| 744 | { | ||
| 745 | pte_t *pte, *start; | ||
| 746 | pteval_t pteval; | ||
| 747 | unsigned long pfn; | ||
| 748 | unsigned int i; | ||
| 749 | |||
| 750 | pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); | ||
| 751 | if (!pte) | ||
| 752 | return -ENOMEM; | ||
| 753 | |||
| 754 | /* Copy the PMD attributes. */ | ||
| 755 | pteval = (pmd_val(*pmd) & ARM_VMSA_PTE_ATTRS_MASK) | ||
| 756 | | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_PAGE; | ||
| 757 | |||
| 758 | pfn = pmd_pfn(*pmd); | ||
| 759 | start = pte; | ||
| 760 | |||
| 761 | for (i = IPMMU_PTRS_PER_PTE; i; --i) | ||
| 762 | *pte++ = pfn_pte(pfn++, __pgprot(pteval)); | ||
| 763 | |||
| 764 | ipmmu_flush_pgtable(mmu, start, PAGE_SIZE); | ||
| 765 | *pmd = __pmd(__pa(start) | PMD_NSTABLE | PMD_TYPE_TABLE); | ||
| 766 | ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); | ||
| 767 | |||
| 768 | return 0; | ||
| 769 | } | ||
| 770 | |||
| 771 | static void ipmmu_split_pte(struct ipmmu_vmsa_device *mmu, pte_t *pte) | ||
| 772 | { | ||
| 773 | unsigned int i; | ||
| 774 | |||
| 775 | for (i = ARM_VMSA_PTE_CONT_ENTRIES; i; --i) | ||
| 776 | pte[i-1] = __pte(pte_val(*pte) & ~ARM_VMSA_PTE_CONT); | ||
| 777 | |||
| 778 | ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * ARM_VMSA_PTE_CONT_ENTRIES); | ||
| 779 | } | ||
| 780 | |||
| 781 | static int ipmmu_clear_mapping(struct ipmmu_vmsa_domain *domain, | ||
| 782 | unsigned long iova, size_t size) | ||
| 783 | { | ||
| 784 | struct ipmmu_vmsa_device *mmu = domain->mmu; | ||
| 785 | unsigned long flags; | ||
| 786 | pgd_t *pgd = domain->pgd; | ||
| 787 | pud_t *pud; | ||
| 788 | pmd_t *pmd; | ||
| 789 | pte_t *pte; | ||
| 790 | |||
| 791 | if (!pgd) | ||
| 792 | return -EINVAL; | ||
| 793 | |||
| 794 | if (size & ~PAGE_MASK) | ||
| 795 | return -EINVAL; | ||
| 796 | |||
| 797 | pgd += pgd_index(iova); | ||
| 798 | pud = (pud_t *)pgd; | ||
| 799 | |||
| 800 | spin_lock_irqsave(&domain->lock, flags); | ||
| 801 | |||
| 802 | /* If there's no PUD or PMD we're done. */ | ||
| 803 | if (pud_none(*pud)) | ||
| 804 | goto done; | ||
| 805 | |||
| 806 | pmd = pmd_offset(pud, iova); | ||
| 807 | if (pmd_none(*pmd)) | ||
| 808 | goto done; | ||
| 809 | |||
| 810 | /* | ||
| 811 | * When freeing a 2MB block just clear the PMD. In the unlikely case the | ||
| 812 | * block is mapped as individual pages this will free the corresponding | ||
| 813 | * PTE page table. | ||
| 814 | */ | ||
| 815 | if (size == SZ_2M) { | ||
| 816 | ipmmu_clear_pmd(mmu, pud, pmd); | ||
| 817 | goto done; | ||
| 818 | } | ||
| 819 | |||
| 820 | /* | ||
| 821 | * If the PMD has been mapped as a section remap it as pages to allow | ||
| 822 | * freeing individual pages. | ||
| 823 | */ | ||
| 824 | if (pmd_sect(*pmd)) | ||
| 825 | ipmmu_split_pmd(mmu, pmd); | ||
| 826 | |||
| 827 | pte = pte_offset_kernel(pmd, iova); | ||
| 828 | |||
| 829 | /* | ||
| 830 | * When freeing a 64kB block just clear the PTE entries. We don't have | ||
| 831 | * to care about the contiguous hint of the surrounding entries. | ||
| 832 | */ | ||
| 833 | if (size == SZ_64K) { | ||
| 834 | ipmmu_clear_pte(mmu, pud, pmd, pte, ARM_VMSA_PTE_CONT_ENTRIES); | ||
| 835 | goto done; | ||
| 836 | } | ||
| 837 | |||
| 838 | /* | ||
| 839 | * If the PTE has been mapped with the contiguous hint set remap it and | ||
| 840 | * its surrounding PTEs to allow unmapping a single page. | ||
| 841 | */ | ||
| 842 | if (pte_val(*pte) & ARM_VMSA_PTE_CONT) | ||
| 843 | ipmmu_split_pte(mmu, pte); | ||
| 844 | |||
| 845 | /* Clear the PTE. */ | ||
| 846 | ipmmu_clear_pte(mmu, pud, pmd, pte, 1); | ||
| 847 | |||
| 848 | done: | ||
| 849 | spin_unlock_irqrestore(&domain->lock, flags); | ||
| 850 | |||
| 851 | ipmmu_tlb_invalidate(domain); | ||
| 852 | |||
| 853 | return 0; | ||
| 854 | } | ||
| 855 | |||
| 856 | /* ----------------------------------------------------------------------------- | ||
| 857 | * IOMMU Operations | 457 | * IOMMU Operations |
| 858 | */ | 458 | */ |
| 859 | 459 | ||
| @@ -867,12 +467,6 @@ static int ipmmu_domain_init(struct iommu_domain *io_domain) | |||
| 867 | 467 | ||
| 868 | spin_lock_init(&domain->lock); | 468 | spin_lock_init(&domain->lock); |
| 869 | 469 | ||
| 870 | domain->pgd = kzalloc(IPMMU_PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); | ||
| 871 | if (!domain->pgd) { | ||
| 872 | kfree(domain); | ||
| 873 | return -ENOMEM; | ||
| 874 | } | ||
| 875 | |||
| 876 | io_domain->priv = domain; | 470 | io_domain->priv = domain; |
| 877 | domain->io_domain = io_domain; | 471 | domain->io_domain = io_domain; |
| 878 | 472 | ||
| @@ -888,7 +482,7 @@ static void ipmmu_domain_destroy(struct iommu_domain *io_domain) | |||
| 888 | * been detached. | 482 | * been detached. |
| 889 | */ | 483 | */ |
| 890 | ipmmu_domain_destroy_context(domain); | 484 | ipmmu_domain_destroy_context(domain); |
| 891 | ipmmu_free_pgtables(domain); | 485 | free_io_pgtable_ops(domain->iop); |
| 892 | kfree(domain); | 486 | kfree(domain); |
| 893 | } | 487 | } |
| 894 | 488 | ||
| @@ -957,53 +551,25 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, | |||
| 957 | if (!domain) | 551 | if (!domain) |
| 958 | return -ENODEV; | 552 | return -ENODEV; |
| 959 | 553 | ||
| 960 | return ipmmu_create_mapping(domain, iova, paddr, size, prot); | 554 | return domain->iop->map(domain->iop, iova, paddr, size, prot); |
| 961 | } | 555 | } |
| 962 | 556 | ||
| 963 | static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, | 557 | static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, |
| 964 | size_t size) | 558 | size_t size) |
| 965 | { | 559 | { |
| 966 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | 560 | struct ipmmu_vmsa_domain *domain = io_domain->priv; |
| 967 | int ret; | ||
| 968 | 561 | ||
| 969 | ret = ipmmu_clear_mapping(domain, iova, size); | 562 | return domain->iop->unmap(domain->iop, iova, size); |
| 970 | return ret ? 0 : size; | ||
| 971 | } | 563 | } |
| 972 | 564 | ||
| 973 | static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, | 565 | static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, |
| 974 | dma_addr_t iova) | 566 | dma_addr_t iova) |
| 975 | { | 567 | { |
| 976 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | 568 | struct ipmmu_vmsa_domain *domain = io_domain->priv; |
| 977 | pgd_t pgd; | ||
| 978 | pud_t pud; | ||
| 979 | pmd_t pmd; | ||
| 980 | pte_t pte; | ||
| 981 | 569 | ||
| 982 | /* TODO: Is locking needed ? */ | 570 | /* TODO: Is locking needed ? */ |
| 983 | 571 | ||
| 984 | if (!domain->pgd) | 572 | return domain->iop->iova_to_phys(domain->iop, iova); |
| 985 | return 0; | ||
| 986 | |||
| 987 | pgd = *(domain->pgd + pgd_index(iova)); | ||
| 988 | if (pgd_none(pgd)) | ||
| 989 | return 0; | ||
| 990 | |||
| 991 | pud = *pud_offset(&pgd, iova); | ||
| 992 | if (pud_none(pud)) | ||
| 993 | return 0; | ||
| 994 | |||
| 995 | pmd = *pmd_offset(&pud, iova); | ||
| 996 | if (pmd_none(pmd)) | ||
| 997 | return 0; | ||
| 998 | |||
| 999 | if (pmd_sect(pmd)) | ||
| 1000 | return __pfn_to_phys(pmd_pfn(pmd)) | (iova & ~PMD_MASK); | ||
| 1001 | |||
| 1002 | pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); | ||
| 1003 | if (pte_none(pte)) | ||
| 1004 | return 0; | ||
| 1005 | |||
| 1006 | return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); | ||
| 1007 | } | 573 | } |
| 1008 | 574 | ||
| 1009 | static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev, | 575 | static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev, |
| @@ -1188,7 +754,7 @@ static const struct iommu_ops ipmmu_ops = { | |||
| 1188 | .iova_to_phys = ipmmu_iova_to_phys, | 754 | .iova_to_phys = ipmmu_iova_to_phys, |
| 1189 | .add_device = ipmmu_add_device, | 755 | .add_device = ipmmu_add_device, |
| 1190 | .remove_device = ipmmu_remove_device, | 756 | .remove_device = ipmmu_remove_device, |
| 1191 | .pgsize_bitmap = SZ_2M | SZ_64K | SZ_4K, | 757 | .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, |
| 1192 | }; | 758 | }; |
| 1193 | 759 | ||
| 1194 | /* ----------------------------------------------------------------------------- | 760 | /* ----------------------------------------------------------------------------- |
