diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-09-03 05:33:51 -0400 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-09-03 10:11:17 -0400 |
commit | abdc5eb3d69279039ba6cb89719913d08013ab14 (patch) | |
tree | fc77040a85d9601d91c6dee0e297386a3f5f7ccc /arch/x86/kernel/amd_iommu.c | |
parent | a6b256b41357c33ccb2d105a4457e22bdc83e021 (diff) |
x86/amd-iommu: Change iommu_map_page to support multiple page sizes
This patch adds a map_size parameter to the iommu_map_page
function which makes it generic enough to handle multiple
page sizes. This also requires a change to alloc_pte which
is also done in this patch.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 31 |
1 files changed, 20 insertions, 11 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 002cf9cab9e9..45be9499c973 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -56,8 +56,8 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | |||
56 | struct unity_map_entry *e); | 56 | struct unity_map_entry *e); |
57 | static struct dma_ops_domain *find_protection_domain(u16 devid); | 57 | static struct dma_ops_domain *find_protection_domain(u16 devid); |
58 | static u64 *alloc_pte(struct protection_domain *domain, | 58 | static u64 *alloc_pte(struct protection_domain *domain, |
59 | unsigned long address, u64 | 59 | unsigned long address, int end_lvl, |
60 | **pte_page, gfp_t gfp); | 60 | u64 **pte_page, gfp_t gfp); |
61 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, | 61 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, |
62 | unsigned long start_page, | 62 | unsigned long start_page, |
63 | unsigned int pages); | 63 | unsigned int pages); |
@@ -523,17 +523,21 @@ void amd_iommu_flush_all_devices(void) | |||
523 | static int iommu_map_page(struct protection_domain *dom, | 523 | static int iommu_map_page(struct protection_domain *dom, |
524 | unsigned long bus_addr, | 524 | unsigned long bus_addr, |
525 | unsigned long phys_addr, | 525 | unsigned long phys_addr, |
526 | int prot) | 526 | int prot, |
527 | int map_size) | ||
527 | { | 528 | { |
528 | u64 __pte, *pte; | 529 | u64 __pte, *pte; |
529 | 530 | ||
530 | bus_addr = PAGE_ALIGN(bus_addr); | 531 | bus_addr = PAGE_ALIGN(bus_addr); |
531 | phys_addr = PAGE_ALIGN(phys_addr); | 532 | phys_addr = PAGE_ALIGN(phys_addr); |
532 | 533 | ||
534 | BUG_ON(!PM_ALIGNED(map_size, bus_addr)); | ||
535 | BUG_ON(!PM_ALIGNED(map_size, phys_addr)); | ||
536 | |||
533 | if (!(prot & IOMMU_PROT_MASK)) | 537 | if (!(prot & IOMMU_PROT_MASK)) |
534 | return -EINVAL; | 538 | return -EINVAL; |
535 | 539 | ||
536 | pte = alloc_pte(dom, bus_addr, NULL, GFP_KERNEL); | 540 | pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL); |
537 | 541 | ||
538 | if (IOMMU_PTE_PRESENT(*pte)) | 542 | if (IOMMU_PTE_PRESENT(*pte)) |
539 | return -EBUSY; | 543 | return -EBUSY; |
@@ -612,7 +616,8 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | |||
612 | 616 | ||
613 | for (addr = e->address_start; addr < e->address_end; | 617 | for (addr = e->address_start; addr < e->address_end; |
614 | addr += PAGE_SIZE) { | 618 | addr += PAGE_SIZE) { |
615 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot); | 619 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, |
620 | PM_MAP_4k); | ||
616 | if (ret) | 621 | if (ret) |
617 | return ret; | 622 | return ret; |
618 | /* | 623 | /* |
@@ -729,7 +734,7 @@ static int alloc_new_range(struct amd_iommu *iommu, | |||
729 | u64 *pte, *pte_page; | 734 | u64 *pte, *pte_page; |
730 | 735 | ||
731 | for (i = 0; i < num_ptes; ++i) { | 736 | for (i = 0; i < num_ptes; ++i) { |
732 | pte = alloc_pte(&dma_dom->domain, address, | 737 | pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k, |
733 | &pte_page, gfp); | 738 | &pte_page, gfp); |
734 | if (!pte) | 739 | if (!pte) |
735 | goto out_free; | 740 | goto out_free; |
@@ -1356,7 +1361,10 @@ static bool increase_address_space(struct protection_domain *domain, | |||
1356 | } | 1361 | } |
1357 | 1362 | ||
1358 | static u64 *alloc_pte(struct protection_domain *domain, | 1363 | static u64 *alloc_pte(struct protection_domain *domain, |
1359 | unsigned long address, u64 **pte_page, gfp_t gfp) | 1364 | unsigned long address, |
1365 | int end_lvl, | ||
1366 | u64 **pte_page, | ||
1367 | gfp_t gfp) | ||
1360 | { | 1368 | { |
1361 | u64 *pte, *page; | 1369 | u64 *pte, *page; |
1362 | int level; | 1370 | int level; |
@@ -1367,7 +1375,7 @@ static u64 *alloc_pte(struct protection_domain *domain, | |||
1367 | level = domain->mode - 1; | 1375 | level = domain->mode - 1; |
1368 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | 1376 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; |
1369 | 1377 | ||
1370 | while (level > 0) { | 1378 | while (level > end_lvl) { |
1371 | if (!IOMMU_PTE_PRESENT(*pte)) { | 1379 | if (!IOMMU_PTE_PRESENT(*pte)) { |
1372 | page = (u64 *)get_zeroed_page(gfp); | 1380 | page = (u64 *)get_zeroed_page(gfp); |
1373 | if (!page) | 1381 | if (!page) |
@@ -1379,7 +1387,7 @@ static u64 *alloc_pte(struct protection_domain *domain, | |||
1379 | 1387 | ||
1380 | pte = IOMMU_PTE_PAGE(*pte); | 1388 | pte = IOMMU_PTE_PAGE(*pte); |
1381 | 1389 | ||
1382 | if (pte_page && level == 0) | 1390 | if (pte_page && level == end_lvl) |
1383 | *pte_page = pte; | 1391 | *pte_page = pte; |
1384 | 1392 | ||
1385 | pte = &pte[PM_LEVEL_INDEX(level, address)]; | 1393 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
@@ -1403,7 +1411,8 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom, | |||
1403 | 1411 | ||
1404 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; | 1412 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; |
1405 | if (!pte) { | 1413 | if (!pte) { |
1406 | pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC); | 1414 | pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page, |
1415 | GFP_ATOMIC); | ||
1407 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; | 1416 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; |
1408 | } else | 1417 | } else |
1409 | pte += PM_LEVEL_INDEX(0, address); | 1418 | pte += PM_LEVEL_INDEX(0, address); |
@@ -2176,7 +2185,7 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
2176 | paddr &= PAGE_MASK; | 2185 | paddr &= PAGE_MASK; |
2177 | 2186 | ||
2178 | for (i = 0; i < npages; ++i) { | 2187 | for (i = 0; i < npages; ++i) { |
2179 | ret = iommu_map_page(domain, iova, paddr, prot); | 2188 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); |
2180 | if (ret) | 2189 | if (ret) |
2181 | return ret; | 2190 | return ret; |
2182 | 2191 | ||