aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-12-03 09:04:09 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-01-03 08:11:08 -0500
commitdde57a210dcdce85e2813bab8f88687761d9f6a6 (patch)
treecac9c8c6cc41361dad1c4d1c71f5926e670185fc /drivers
parent4c5478c94eb29e6101f1f13175f7455bc8b5d953 (diff)
VT-d: adapt domain map and unmap functions for IOMMU API
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/pci/intel-iommu.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index db9a26cfeb8f..8af6c96f31b3 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -3047,20 +3047,28 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
3047 vm_domain_remove_one_dev_info(dmar_domain, pdev); 3047 vm_domain_remove_one_dev_info(dmar_domain, pdev);
3048} 3048}
3049 3049
3050int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova, 3050static int intel_iommu_map_range(struct iommu_domain *domain,
3051 u64 hpa, size_t size, int prot) 3051 unsigned long iova, phys_addr_t hpa,
3052 size_t size, int iommu_prot)
3052{ 3053{
3054 struct dmar_domain *dmar_domain = domain->priv;
3053 u64 max_addr; 3055 u64 max_addr;
3054 int addr_width; 3056 int addr_width;
3057 int prot = 0;
3055 int ret; 3058 int ret;
3056 3059
3060 if (iommu_prot & IOMMU_READ)
3061 prot |= DMA_PTE_READ;
3062 if (iommu_prot & IOMMU_WRITE)
3063 prot |= DMA_PTE_WRITE;
3064
3057 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); 3065 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
3058 if (domain->max_addr < max_addr) { 3066 if (dmar_domain->max_addr < max_addr) {
3059 int min_agaw; 3067 int min_agaw;
3060 u64 end; 3068 u64 end;
3061 3069
3062 /* check if minimum agaw is sufficient for mapped address */ 3070 /* check if minimum agaw is sufficient for mapped address */
3063 min_agaw = vm_domain_min_agaw(domain); 3071 min_agaw = vm_domain_min_agaw(dmar_domain);
3064 addr_width = agaw_to_width(min_agaw); 3072 addr_width = agaw_to_width(min_agaw);
3065 end = DOMAIN_MAX_ADDR(addr_width); 3073 end = DOMAIN_MAX_ADDR(addr_width);
3066 end = end & VTD_PAGE_MASK; 3074 end = end & VTD_PAGE_MASK;
@@ -3070,28 +3078,27 @@ int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova,
3070 __func__, min_agaw, max_addr); 3078 __func__, min_agaw, max_addr);
3071 return -EFAULT; 3079 return -EFAULT;
3072 } 3080 }
3073 domain->max_addr = max_addr; 3081 dmar_domain->max_addr = max_addr;
3074 } 3082 }
3075 3083
3076 ret = domain_page_mapping(domain, iova, hpa, size, prot); 3084 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
3077 return ret; 3085 return ret;
3078} 3086}
3079EXPORT_SYMBOL_GPL(intel_iommu_map_address);
3080 3087
3081void intel_iommu_unmap_address(struct dmar_domain *domain, 3088static void intel_iommu_unmap_range(struct iommu_domain *domain,
3082 dma_addr_t iova, size_t size) 3089 unsigned long iova, size_t size)
3083{ 3090{
3091 struct dmar_domain *dmar_domain = domain->priv;
3084 dma_addr_t base; 3092 dma_addr_t base;
3085 3093
3086 /* The address might not be aligned */ 3094 /* The address might not be aligned */
3087 base = iova & VTD_PAGE_MASK; 3095 base = iova & VTD_PAGE_MASK;
3088 size = VTD_PAGE_ALIGN(size); 3096 size = VTD_PAGE_ALIGN(size);
3089 dma_pte_clear_range(domain, base, base + size); 3097 dma_pte_clear_range(dmar_domain, base, base + size);
3090 3098
3091 if (domain->max_addr == base + size) 3099 if (dmar_domain->max_addr == base + size)
3092 domain->max_addr = base; 3100 dmar_domain->max_addr = base;
3093} 3101}
3094EXPORT_SYMBOL_GPL(intel_iommu_unmap_address);
3095 3102
3096int intel_iommu_found(void) 3103int intel_iommu_found(void)
3097{ 3104{