aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-09-02 10:48:40 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2009-09-03 10:03:46 -0400
commit8bc3e127421bf3b735edbde05135892c12c5f615 (patch)
tree27acafae8951633fd75ddc6713e77633662b90f5 /arch/x86/kernel/amd_iommu.c
parent50020fb6324465e478d6c8cdbf3c695f0a60358d (diff)
x86/amd-iommu: Change alloc_pte to support 64 bit address space
This patch changes the alloc_pte function to be able to map pages into the whole 64 bit address space supported by AMD IOMMU hardware from the old limit of 2**39 bytes. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c44
1 files changed, 20 insertions, 24 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index fc97b51f0287..3be2b61fc31f 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -55,7 +55,7 @@ struct iommu_cmd {
55static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, 55static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
56 struct unity_map_entry *e); 56 struct unity_map_entry *e);
57static struct dma_ops_domain *find_protection_domain(u16 devid); 57static struct dma_ops_domain *find_protection_domain(u16 devid);
58static u64* alloc_pte(struct protection_domain *dom, 58static u64 *alloc_pte(struct protection_domain *domain,
59 unsigned long address, u64 59 unsigned long address, u64
60 **pte_page, gfp_t gfp); 60 **pte_page, gfp_t gfp);
61static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, 61static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
@@ -1351,39 +1351,35 @@ static bool increase_address_space(struct protection_domain *domain,
1351 return true; 1351 return true;
1352} 1352}
1353 1353
1354/* 1354static u64 *alloc_pte(struct protection_domain *domain,
1355 * If the pte_page is not yet allocated this function is called
1356 */
1357static u64* alloc_pte(struct protection_domain *dom,
1358 unsigned long address, u64 **pte_page, gfp_t gfp) 1355 unsigned long address, u64 **pte_page, gfp_t gfp)
1359{ 1356{
1360 u64 *pte, *page; 1357 u64 *pte, *page;
1358 int level;
1361 1359
1362 pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)]; 1360 while (address > PM_LEVEL_SIZE(domain->mode))
1361 increase_address_space(domain, gfp);
1363 1362
1364 if (!IOMMU_PTE_PRESENT(*pte)) { 1363 level = domain->mode - 1;
1365 page = (u64 *)get_zeroed_page(gfp); 1364 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1366 if (!page)
1367 return NULL;
1368 *pte = IOMMU_L2_PDE(virt_to_phys(page));
1369 }
1370 1365
1371 pte = IOMMU_PTE_PAGE(*pte); 1366 while (level > 0) {
1372 pte = &pte[IOMMU_PTE_L1_INDEX(address)]; 1367 if (!IOMMU_PTE_PRESENT(*pte)) {
1368 page = (u64 *)get_zeroed_page(gfp);
1369 if (!page)
1370 return NULL;
1371 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
1372 }
1373 1373
1374 if (!IOMMU_PTE_PRESENT(*pte)) { 1374 level -= 1;
1375 page = (u64 *)get_zeroed_page(gfp);
1376 if (!page)
1377 return NULL;
1378 *pte = IOMMU_L1_PDE(virt_to_phys(page));
1379 }
1380 1375
1381 pte = IOMMU_PTE_PAGE(*pte); 1376 pte = IOMMU_PTE_PAGE(*pte);
1382 1377
1383 if (pte_page) 1378 if (pte_page && level == 0)
1384 *pte_page = pte; 1379 *pte_page = pte;
1385 1380
1386 pte = &pte[IOMMU_PTE_L0_INDEX(address)]; 1381 pte = &pte[PM_LEVEL_INDEX(level, address)];
1382 }
1387 1383
1388 return pte; 1384 return pte;
1389} 1385}