aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-01-21 05:50:28 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2010-03-07 12:01:13 -0500
commit12c7389abe5786349d3ea6da1961cf78d0c1c7cd (patch)
tree02ba72cf32986de8327a6146d5fd0b2448935958 /arch/x86/kernel/amd_iommu.c
parent468e2366cdb80cf8a691b8bc212260cfbdbd518e (diff)
iommu-api: Remove iommu_{un}map_range functions
These functions are not longer used and can be removed savely. There functionality is now provided by the iommu_{un}map functions which are also capable of multiple page sizes. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c48
1 files changed, 0 insertions, 48 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 0e068c9ca5f5..d8da9988edd9 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -2506,52 +2506,6 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
2506 return ret; 2506 return ret;
2507} 2507}
2508 2508
2509static int amd_iommu_map_range(struct iommu_domain *dom,
2510 unsigned long iova, phys_addr_t paddr,
2511 size_t size, int iommu_prot)
2512{
2513 struct protection_domain *domain = dom->priv;
2514 unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE);
2515 int prot = 0;
2516 int ret;
2517
2518 if (iommu_prot & IOMMU_READ)
2519 prot |= IOMMU_PROT_IR;
2520 if (iommu_prot & IOMMU_WRITE)
2521 prot |= IOMMU_PROT_IW;
2522
2523 iova &= PAGE_MASK;
2524 paddr &= PAGE_MASK;
2525
2526 for (i = 0; i < npages; ++i) {
2527 ret = iommu_map_page(domain, iova, paddr, prot, PAGE_SIZE);
2528 if (ret)
2529 return ret;
2530
2531 iova += PAGE_SIZE;
2532 paddr += PAGE_SIZE;
2533 }
2534
2535 return 0;
2536}
2537
2538static void amd_iommu_unmap_range(struct iommu_domain *dom,
2539 unsigned long iova, size_t size)
2540{
2541
2542 struct protection_domain *domain = dom->priv;
2543 unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE);
2544
2545 iova &= PAGE_MASK;
2546
2547 for (i = 0; i < npages; ++i) {
2548 iommu_unmap_page(domain, iova, PAGE_SIZE);
2549 iova += PAGE_SIZE;
2550 }
2551
2552 iommu_flush_tlb_pde(domain);
2553}
2554
2555static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, 2509static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
2556 phys_addr_t paddr, int gfp_order, int iommu_prot) 2510 phys_addr_t paddr, int gfp_order, int iommu_prot)
2557{ 2511{
@@ -2616,8 +2570,6 @@ static struct iommu_ops amd_iommu_ops = {
2616 .detach_dev = amd_iommu_detach_device, 2570 .detach_dev = amd_iommu_detach_device,
2617 .map = amd_iommu_map, 2571 .map = amd_iommu_map,
2618 .unmap = amd_iommu_unmap, 2572 .unmap = amd_iommu_unmap,
2619 .map_range = amd_iommu_map_range,
2620 .unmap_range = amd_iommu_unmap_range,
2621 .iova_to_phys = amd_iommu_iova_to_phys, 2573 .iova_to_phys = amd_iommu_iova_to_phys,
2622 .domain_has_cap = amd_iommu_domain_has_cap, 2574 .domain_has_cap = amd_iommu_domain_has_cap,
2623}; 2575};