diff options
| -rw-r--r-- | arch/x86/include/asm/amd_iommu_types.h | 28 | ||||
| -rw-r--r-- | arch/x86/kernel/amd_iommu.c | 53 |
2 files changed, 61 insertions, 20 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index ba19ad4c47d0..5e8da56755dd 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
| @@ -172,6 +172,34 @@ | |||
| 172 | (~((1ULL << (12 + ((lvl) * 9))) - 1))) | 172 | (~((1ULL << (12 + ((lvl) * 9))) - 1))) |
| 173 | #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) | 173 | #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) |
| 174 | 174 | ||
| 175 | /* | ||
| 176 | * Returns the page table level to use for a given page size | ||
| 177 | * Pagesize is expected to be a power-of-two | ||
| 178 | */ | ||
| 179 | #define PAGE_SIZE_LEVEL(pagesize) \ | ||
| 180 | ((__ffs(pagesize) - 12) / 9) | ||
| 181 | /* | ||
| 182 | * Returns the number of ptes to use for a given page size | ||
| 183 | * Pagesize is expected to be a power-of-two | ||
| 184 | */ | ||
| 185 | #define PAGE_SIZE_PTE_COUNT(pagesize) \ | ||
| 186 | (1ULL << ((__ffs(pagesize) - 12) % 9)) | ||
| 187 | |||
| 188 | /* | ||
| 189 | * Aligns a given io-virtual address to a given page size | ||
| 190 | * Pagesize is expected to be a power-of-two | ||
| 191 | */ | ||
| 192 | #define PAGE_SIZE_ALIGN(address, pagesize) \ | ||
| 193 | ((address) & ~((pagesize) - 1)) | ||
| 194 | /* | ||
| 195 | * Creates an IOMMU PTE for an address an a given pagesize | ||
| 196 | * The PTE has no permission bits set | ||
| 197 | * Pagesize is expected to be a power-of-two larger than 4096 | ||
| 198 | */ | ||
| 199 | #define PAGE_SIZE_PTE(address, pagesize) \ | ||
| 200 | (((address) | ((pagesize) - 1)) & \ | ||
| 201 | (~(pagesize >> 1)) & PM_ADDR_MASK) | ||
| 202 | |||
| 175 | #define IOMMU_PTE_P (1ULL << 0) | 203 | #define IOMMU_PTE_P (1ULL << 0) |
| 176 | #define IOMMU_PTE_TV (1ULL << 1) | 204 | #define IOMMU_PTE_TV (1ULL << 1) |
| 177 | #define IOMMU_PTE_U (1ULL << 59) | 205 | #define IOMMU_PTE_U (1ULL << 59) |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 59cae7c4df54..41700314f3e0 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
| @@ -730,18 +730,22 @@ static bool increase_address_space(struct protection_domain *domain, | |||
| 730 | 730 | ||
| 731 | static u64 *alloc_pte(struct protection_domain *domain, | 731 | static u64 *alloc_pte(struct protection_domain *domain, |
| 732 | unsigned long address, | 732 | unsigned long address, |
| 733 | int end_lvl, | 733 | unsigned long page_size, |
| 734 | u64 **pte_page, | 734 | u64 **pte_page, |
| 735 | gfp_t gfp) | 735 | gfp_t gfp) |
| 736 | { | 736 | { |
| 737 | int level, end_lvl; | ||
| 737 | u64 *pte, *page; | 738 | u64 *pte, *page; |
| 738 | int level; | 739 | |
| 740 | BUG_ON(!is_power_of_2(page_size)); | ||
| 739 | 741 | ||
| 740 | while (address > PM_LEVEL_SIZE(domain->mode)) | 742 | while (address > PM_LEVEL_SIZE(domain->mode)) |
| 741 | increase_address_space(domain, gfp); | 743 | increase_address_space(domain, gfp); |
| 742 | 744 | ||
| 743 | level = domain->mode - 1; | 745 | level = domain->mode - 1; |
| 744 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | 746 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; |
| 747 | address = PAGE_SIZE_ALIGN(address, page_size); | ||
| 748 | end_lvl = PAGE_SIZE_LEVEL(page_size); | ||
| 745 | 749 | ||
| 746 | while (level > end_lvl) { | 750 | while (level > end_lvl) { |
| 747 | if (!IOMMU_PTE_PRESENT(*pte)) { | 751 | if (!IOMMU_PTE_PRESENT(*pte)) { |
| @@ -751,6 +755,10 @@ static u64 *alloc_pte(struct protection_domain *domain, | |||
| 751 | *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); | 755 | *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); |
| 752 | } | 756 | } |
| 753 | 757 | ||
| 758 | /* No level skipping support yet */ | ||
| 759 | if (PM_PTE_LEVEL(*pte) != level) | ||
| 760 | return NULL; | ||
| 761 | |||
| 754 | level -= 1; | 762 | level -= 1; |
| 755 | 763 | ||
| 756 | pte = IOMMU_PTE_PAGE(*pte); | 764 | pte = IOMMU_PTE_PAGE(*pte); |
| @@ -806,31 +814,36 @@ static int iommu_map_page(struct protection_domain *dom, | |||
| 806 | unsigned long bus_addr, | 814 | unsigned long bus_addr, |
| 807 | unsigned long phys_addr, | 815 | unsigned long phys_addr, |
| 808 | int prot, | 816 | int prot, |
| 809 | int map_size) | 817 | unsigned long page_size) |
| 810 | { | 818 | { |
| 811 | u64 __pte, *pte; | 819 | u64 __pte, *pte; |
| 812 | 820 | int i, count; | |
| 813 | bus_addr = PAGE_ALIGN(bus_addr); | ||
| 814 | phys_addr = PAGE_ALIGN(phys_addr); | ||
| 815 | |||
| 816 | BUG_ON(!PM_ALIGNED(map_size, bus_addr)); | ||
| 817 | BUG_ON(!PM_ALIGNED(map_size, phys_addr)); | ||
| 818 | 821 | ||
| 819 | if (!(prot & IOMMU_PROT_MASK)) | 822 | if (!(prot & IOMMU_PROT_MASK)) |
| 820 | return -EINVAL; | 823 | return -EINVAL; |
| 821 | 824 | ||
| 822 | pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL); | 825 | bus_addr = PAGE_ALIGN(bus_addr); |
| 826 | phys_addr = PAGE_ALIGN(phys_addr); | ||
| 827 | count = PAGE_SIZE_PTE_COUNT(page_size); | ||
| 828 | pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); | ||
| 829 | |||
| 830 | for (i = 0; i < count; ++i) | ||
| 831 | if (IOMMU_PTE_PRESENT(pte[i])) | ||
| 832 | return -EBUSY; | ||
| 823 | 833 | ||
| 824 | if (IOMMU_PTE_PRESENT(*pte)) | 834 | if (page_size > PAGE_SIZE) { |
| 825 | return -EBUSY; | 835 | __pte = PAGE_SIZE_PTE(phys_addr, page_size); |
| 836 | __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC; | ||
| 837 | } else | ||
| 838 | __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC; | ||
| 826 | 839 | ||
| 827 | __pte = phys_addr | IOMMU_PTE_P; | ||
| 828 | if (prot & IOMMU_PROT_IR) | 840 | if (prot & IOMMU_PROT_IR) |
| 829 | __pte |= IOMMU_PTE_IR; | 841 | __pte |= IOMMU_PTE_IR; |
| 830 | if (prot & IOMMU_PROT_IW) | 842 | if (prot & IOMMU_PROT_IW) |
| 831 | __pte |= IOMMU_PTE_IW; | 843 | __pte |= IOMMU_PTE_IW; |
| 832 | 844 | ||
| 833 | *pte = __pte; | 845 | for (i = 0; i < count; ++i) |
| 846 | pte[i] = __pte; | ||
| 834 | 847 | ||
| 835 | update_domain(dom); | 848 | update_domain(dom); |
| 836 | 849 | ||
| @@ -877,7 +890,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | |||
| 877 | for (addr = e->address_start; addr < e->address_end; | 890 | for (addr = e->address_start; addr < e->address_end; |
| 878 | addr += PAGE_SIZE) { | 891 | addr += PAGE_SIZE) { |
| 879 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, | 892 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, |
| 880 | PM_MAP_4k); | 893 | PAGE_SIZE); |
| 881 | if (ret) | 894 | if (ret) |
| 882 | return ret; | 895 | return ret; |
| 883 | /* | 896 | /* |
| @@ -1005,7 +1018,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
| 1005 | u64 *pte, *pte_page; | 1018 | u64 *pte, *pte_page; |
| 1006 | 1019 | ||
| 1007 | for (i = 0; i < num_ptes; ++i) { | 1020 | for (i = 0; i < num_ptes; ++i) { |
| 1008 | pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k, | 1021 | pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, |
| 1009 | &pte_page, gfp); | 1022 | &pte_page, gfp); |
| 1010 | if (!pte) | 1023 | if (!pte) |
| 1011 | goto out_free; | 1024 | goto out_free; |
| @@ -1711,7 +1724,7 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom, | |||
| 1711 | 1724 | ||
| 1712 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; | 1725 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; |
| 1713 | if (!pte) { | 1726 | if (!pte) { |
| 1714 | pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page, | 1727 | pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page, |
| 1715 | GFP_ATOMIC); | 1728 | GFP_ATOMIC); |
| 1716 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; | 1729 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; |
| 1717 | } else | 1730 | } else |
| @@ -2457,7 +2470,7 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
| 2457 | paddr &= PAGE_MASK; | 2470 | paddr &= PAGE_MASK; |
| 2458 | 2471 | ||
| 2459 | for (i = 0; i < npages; ++i) { | 2472 | for (i = 0; i < npages; ++i) { |
| 2460 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); | 2473 | ret = iommu_map_page(domain, iova, paddr, prot, PAGE_SIZE); |
| 2461 | if (ret) | 2474 | if (ret) |
| 2462 | return ret; | 2475 | return ret; |
| 2463 | 2476 | ||
