aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-09-03 06:21:31 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2009-09-03 10:11:08 -0400
commita6b256b41357c33ccb2d105a4457e22bdc83e021 (patch)
treec8567952372218e4cc1a864835ceb3162068fb80 /arch
parent674d798a80cb6ea1defa01899099f40d9124423c (diff)
x86/amd-iommu: Support higher level PTEs in iommu_page_unmap
This patch changes fetch_pte and iommu_page_unmap to support different page sizes too. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h1
-rw-r--r--arch/x86/kernel/amd_iommu.c21
2 files changed, 14 insertions, 8 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index d66430de5f7c..351ca39ece05 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -158,6 +158,7 @@
158#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) 158#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
159#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ 159#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
160 IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) 160 IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
161#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
161 162
162#define IOMMU_PTE_P (1ULL << 0) 163#define IOMMU_PTE_P (1ULL << 0)
163#define IOMMU_PTE_TV (1ULL << 1) 164#define IOMMU_PTE_TV (1ULL << 1)
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index addf6588c366..002cf9cab9e9 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -62,7 +62,7 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
62 unsigned long start_page, 62 unsigned long start_page,
63 unsigned int pages); 63 unsigned int pages);
64static u64 *fetch_pte(struct protection_domain *domain, 64static u64 *fetch_pte(struct protection_domain *domain,
65 unsigned long address); 65 unsigned long address, int map_size);
66static void update_domain(struct protection_domain *domain); 66static void update_domain(struct protection_domain *domain);
67 67
68#ifndef BUS_NOTIFY_UNBOUND_DRIVER 68#ifndef BUS_NOTIFY_UNBOUND_DRIVER
@@ -552,9 +552,9 @@ static int iommu_map_page(struct protection_domain *dom,
552} 552}
553 553
554static void iommu_unmap_page(struct protection_domain *dom, 554static void iommu_unmap_page(struct protection_domain *dom,
555 unsigned long bus_addr) 555 unsigned long bus_addr, int map_size)
556{ 556{
557 u64 *pte = fetch_pte(dom, bus_addr); 557 u64 *pte = fetch_pte(dom, bus_addr, map_size);
558 558
559 if (pte) 559 if (pte)
560 *pte = 0; 560 *pte = 0;
@@ -668,7 +668,7 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
668 * there is one, it returns the pointer to it. 668 * there is one, it returns the pointer to it.
669 */ 669 */
670static u64 *fetch_pte(struct protection_domain *domain, 670static u64 *fetch_pte(struct protection_domain *domain,
671 unsigned long address) 671 unsigned long address, int map_size)
672{ 672{
673 int level; 673 int level;
674 u64 *pte; 674 u64 *pte;
@@ -676,7 +676,7 @@ static u64 *fetch_pte(struct protection_domain *domain,
676 level = domain->mode - 1; 676 level = domain->mode - 1;
677 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; 677 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
678 678
679 while (level > 0) { 679 while (level > map_size) {
680 if (!IOMMU_PTE_PRESENT(*pte)) 680 if (!IOMMU_PTE_PRESENT(*pte))
681 return NULL; 681 return NULL;
682 682
@@ -684,6 +684,11 @@ static u64 *fetch_pte(struct protection_domain *domain,
684 684
685 pte = IOMMU_PTE_PAGE(*pte); 685 pte = IOMMU_PTE_PAGE(*pte);
686 pte = &pte[PM_LEVEL_INDEX(level, address)]; 686 pte = &pte[PM_LEVEL_INDEX(level, address)];
687
688 if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
689 pte = NULL;
690 break;
691 }
687 } 692 }
688 693
689 return pte; 694 return pte;
@@ -757,7 +762,7 @@ static int alloc_new_range(struct amd_iommu *iommu,
757 for (i = dma_dom->aperture[index]->offset; 762 for (i = dma_dom->aperture[index]->offset;
758 i < dma_dom->aperture_size; 763 i < dma_dom->aperture_size;
759 i += PAGE_SIZE) { 764 i += PAGE_SIZE) {
760 u64 *pte = fetch_pte(&dma_dom->domain, i); 765 u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k);
761 if (!pte || !IOMMU_PTE_PRESENT(*pte)) 766 if (!pte || !IOMMU_PTE_PRESENT(*pte))
762 continue; 767 continue;
763 768
@@ -2192,7 +2197,7 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
2192 iova &= PAGE_MASK; 2197 iova &= PAGE_MASK;
2193 2198
2194 for (i = 0; i < npages; ++i) { 2199 for (i = 0; i < npages; ++i) {
2195 iommu_unmap_page(domain, iova); 2200 iommu_unmap_page(domain, iova, PM_MAP_4k);
2196 iova += PAGE_SIZE; 2201 iova += PAGE_SIZE;
2197 } 2202 }
2198 2203
@@ -2207,7 +2212,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2207 phys_addr_t paddr; 2212 phys_addr_t paddr;
2208 u64 *pte; 2213 u64 *pte;
2209 2214
2210 pte = fetch_pte(domain, iova); 2215 pte = fetch_pte(domain, iova, PM_MAP_4k);
2211 2216
2212 if (!pte || !IOMMU_PTE_PRESENT(*pte)) 2217 if (!pte || !IOMMU_PTE_PRESENT(*pte))
2213 return 0; 2218 return 0;