diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-01-19 11:27:39 -0500 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2010-03-07 12:01:12 -0500 |
commit | 24cd772315c19e4d9409d0d21367ec1ebab3149f (patch) | |
tree | cf8a322797fb4d27c21451b1b9fbbdd234dc667e /arch | |
parent | cbb9d729f3433c9c2660b01dc52e6deb89488886 (diff) |
x86/amd-iommu: Make iommu_unmap_page and fetch_pte aware of page sizes
This patch extends the functionality of iommu_unmap_page
and fetch_pte to support arbitrary page sizes.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/amd_iommu_types.h | 6 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 90 |
2 files changed, 78 insertions, 18 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 5e8da56755dd..b150c74e0d48 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -200,6 +200,12 @@ | |||
200 | (((address) | ((pagesize) - 1)) & \ | 200 | (((address) | ((pagesize) - 1)) & \ |
201 | (~(pagesize >> 1)) & PM_ADDR_MASK) | 201 | (~(pagesize >> 1)) & PM_ADDR_MASK) |
202 | 202 | ||
203 | /* | ||
204 | * Takes a PTE value with mode=0x07 and returns the page size it maps | ||
205 | */ | ||
206 | #define PTE_PAGE_SIZE(pte) \ | ||
207 | (1ULL << (1 + ffz(((pte) | 0xfffULL)))) | ||
208 | |||
203 | #define IOMMU_PTE_P (1ULL << 0) | 209 | #define IOMMU_PTE_P (1ULL << 0) |
204 | #define IOMMU_PTE_TV (1ULL << 1) | 210 | #define IOMMU_PTE_TV (1ULL << 1) |
205 | #define IOMMU_PTE_U (1ULL << 59) | 211 | #define IOMMU_PTE_U (1ULL << 59) |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 41700314f3e0..503d312f9d6f 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -776,28 +776,47 @@ static u64 *alloc_pte(struct protection_domain *domain, | |||
776 | * This function checks if there is a PTE for a given dma address. If | 776 | * This function checks if there is a PTE for a given dma address. If |
777 | * there is one, it returns the pointer to it. | 777 | * there is one, it returns the pointer to it. |
778 | */ | 778 | */ |
779 | static u64 *fetch_pte(struct protection_domain *domain, | 779 | static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) |
780 | unsigned long address, int map_size) | ||
781 | { | 780 | { |
782 | int level; | 781 | int level; |
783 | u64 *pte; | 782 | u64 *pte; |
784 | 783 | ||
785 | level = domain->mode - 1; | 784 | if (address > PM_LEVEL_SIZE(domain->mode)) |
786 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | 785 | return NULL; |
786 | |||
787 | level = domain->mode - 1; | ||
788 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | ||
787 | 789 | ||
788 | while (level > map_size) { | 790 | while (level > 0) { |
791 | |||
792 | /* Not Present */ | ||
789 | if (!IOMMU_PTE_PRESENT(*pte)) | 793 | if (!IOMMU_PTE_PRESENT(*pte)) |
790 | return NULL; | 794 | return NULL; |
791 | 795 | ||
796 | /* Large PTE */ | ||
797 | if (PM_PTE_LEVEL(*pte) == 0x07) { | ||
798 | unsigned long pte_mask, __pte; | ||
799 | |||
800 | /* | ||
801 | * If we have a series of large PTEs, make | ||
802 | * sure to return a pointer to the first one. | ||
803 | */ | ||
804 | pte_mask = PTE_PAGE_SIZE(*pte); | ||
805 | pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1); | ||
806 | __pte = ((unsigned long)pte) & pte_mask; | ||
807 | |||
808 | return (u64 *)__pte; | ||
809 | } | ||
810 | |||
811 | /* No level skipping support yet */ | ||
812 | if (PM_PTE_LEVEL(*pte) != level) | ||
813 | return NULL; | ||
814 | |||
792 | level -= 1; | 815 | level -= 1; |
793 | 816 | ||
817 | /* Walk to the next level */ | ||
794 | pte = IOMMU_PTE_PAGE(*pte); | 818 | pte = IOMMU_PTE_PAGE(*pte); |
795 | pte = &pte[PM_LEVEL_INDEX(level, address)]; | 819 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
796 | |||
797 | if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) { | ||
798 | pte = NULL; | ||
799 | break; | ||
800 | } | ||
801 | } | 820 | } |
802 | 821 | ||
803 | return pte; | 822 | return pte; |
@@ -850,13 +869,48 @@ static int iommu_map_page(struct protection_domain *dom, | |||
850 | return 0; | 869 | return 0; |
851 | } | 870 | } |
852 | 871 | ||
853 | static void iommu_unmap_page(struct protection_domain *dom, | 872 | static unsigned long iommu_unmap_page(struct protection_domain *dom, |
854 | unsigned long bus_addr, int map_size) | 873 | unsigned long bus_addr, |
874 | unsigned long page_size) | ||
855 | { | 875 | { |
856 | u64 *pte = fetch_pte(dom, bus_addr, map_size); | 876 | unsigned long long unmap_size, unmapped; |
877 | u64 *pte; | ||
878 | |||
879 | BUG_ON(!is_power_of_2(page_size)); | ||
880 | |||
881 | unmapped = 0; | ||
882 | |||
883 | while (unmapped < page_size) { | ||
884 | |||
885 | pte = fetch_pte(dom, bus_addr); | ||
886 | |||
887 | if (!pte) { | ||
888 | /* | ||
889 | * No PTE for this address | ||
890 | * move forward in 4kb steps | ||
891 | */ | ||
892 | unmap_size = PAGE_SIZE; | ||
893 | } else if (PM_PTE_LEVEL(*pte) == 0) { | ||
894 | /* 4kb PTE found for this address */ | ||
895 | unmap_size = PAGE_SIZE; | ||
896 | *pte = 0ULL; | ||
897 | } else { | ||
898 | int count, i; | ||
899 | |||
900 | /* Large PTE found which maps this address */ | ||
901 | unmap_size = PTE_PAGE_SIZE(*pte); | ||
902 | count = PAGE_SIZE_PTE_COUNT(unmap_size); | ||
903 | for (i = 0; i < count; i++) | ||
904 | pte[i] = 0ULL; | ||
905 | } | ||
906 | |||
907 | bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size; | ||
908 | unmapped += unmap_size; | ||
909 | } | ||
910 | |||
911 | BUG_ON(!is_power_of_2(unmapped)); | ||
857 | 912 | ||
858 | if (pte) | 913 | return unmapped; |
859 | *pte = 0; | ||
860 | } | 914 | } |
861 | 915 | ||
862 | /* | 916 | /* |
@@ -1054,7 +1108,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
1054 | for (i = dma_dom->aperture[index]->offset; | 1108 | for (i = dma_dom->aperture[index]->offset; |
1055 | i < dma_dom->aperture_size; | 1109 | i < dma_dom->aperture_size; |
1056 | i += PAGE_SIZE) { | 1110 | i += PAGE_SIZE) { |
1057 | u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k); | 1111 | u64 *pte = fetch_pte(&dma_dom->domain, i); |
1058 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 1112 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
1059 | continue; | 1113 | continue; |
1060 | 1114 | ||
@@ -2491,7 +2545,7 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, | |||
2491 | iova &= PAGE_MASK; | 2545 | iova &= PAGE_MASK; |
2492 | 2546 | ||
2493 | for (i = 0; i < npages; ++i) { | 2547 | for (i = 0; i < npages; ++i) { |
2494 | iommu_unmap_page(domain, iova, PM_MAP_4k); | 2548 | iommu_unmap_page(domain, iova, PAGE_SIZE); |
2495 | iova += PAGE_SIZE; | 2549 | iova += PAGE_SIZE; |
2496 | } | 2550 | } |
2497 | 2551 | ||
@@ -2506,7 +2560,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | |||
2506 | phys_addr_t paddr; | 2560 | phys_addr_t paddr; |
2507 | u64 *pte; | 2561 | u64 *pte; |
2508 | 2562 | ||
2509 | pte = fetch_pte(domain, iova, PM_MAP_4k); | 2563 | pte = fetch_pte(domain, iova); |
2510 | 2564 | ||
2511 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 2565 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
2512 | return 0; | 2566 | return 0; |