diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-01-21 10:37:36 -0500 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2010-03-07 12:01:12 -0500 |
commit | 468e2366cdb80cf8a691b8bc212260cfbdbd518e (patch) | |
tree | a3e750e71c90613950bf5653387c392f58bddc63 /arch/x86/kernel/amd_iommu.c | |
parent | f03152bb7d0a74f409ad63ed36916444a7493d72 (diff) |
x86/amd-iommu: Implement ->{un}map callbacks for iommu-api
This patch implements the new callbacks for the IOMMU-API
with functions that can handle different page sizes in the
IOMMU page table.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 29 |
1 files changed, 29 insertions, 0 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 52e44af15705..0e068c9ca5f5 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -2552,6 +2552,33 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, | |||
2552 | iommu_flush_tlb_pde(domain); | 2552 | iommu_flush_tlb_pde(domain); |
2553 | } | 2553 | } |
2554 | 2554 | ||
2555 | static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, | ||
2556 | phys_addr_t paddr, int gfp_order, int iommu_prot) | ||
2557 | { | ||
2558 | unsigned long page_size = 0x1000UL << gfp_order; | ||
2559 | struct protection_domain *domain = dom->priv; | ||
2560 | int prot = 0; | ||
2561 | |||
2562 | if (iommu_prot & IOMMU_READ) | ||
2563 | prot |= IOMMU_PROT_IR; | ||
2564 | if (iommu_prot & IOMMU_WRITE) | ||
2565 | prot |= IOMMU_PROT_IW; | ||
2566 | |||
2567 | return iommu_map_page(domain, iova, paddr, prot, page_size); | ||
2568 | } | ||
2569 | |||
2570 | static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, | ||
2571 | int gfp_order) | ||
2572 | { | ||
2573 | struct protection_domain *domain = dom->priv; | ||
2574 | unsigned long page_size, unmap_size; | ||
2575 | |||
2576 | page_size = 0x1000UL << gfp_order; | ||
2577 | unmap_size = iommu_unmap_page(domain, iova, page_size); | ||
2578 | |||
2579 | return get_order(unmap_size); | ||
2580 | } | ||
2581 | |||
2555 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | 2582 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, |
2556 | unsigned long iova) | 2583 | unsigned long iova) |
2557 | { | 2584 | { |
@@ -2587,6 +2614,8 @@ static struct iommu_ops amd_iommu_ops = { | |||
2587 | .domain_destroy = amd_iommu_domain_destroy, | 2614 | .domain_destroy = amd_iommu_domain_destroy, |
2588 | .attach_dev = amd_iommu_attach_device, | 2615 | .attach_dev = amd_iommu_attach_device, |
2589 | .detach_dev = amd_iommu_detach_device, | 2616 | .detach_dev = amd_iommu_detach_device, |
2617 | .map = amd_iommu_map, | ||
2618 | .unmap = amd_iommu_unmap, | ||
2590 | .map_range = amd_iommu_map_range, | 2619 | .map_range = amd_iommu_map_range, |
2591 | .unmap_range = amd_iommu_unmap_range, | 2620 | .unmap_range = amd_iommu_unmap_range, |
2592 | .iova_to_phys = amd_iommu_iova_to_phys, | 2621 | .iova_to_phys = amd_iommu_iova_to_phys, |