aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorWeidong Han <weidong.han@intel.com>2008-12-08 10:09:29 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-01-03 08:02:18 -0500
commitfaa3d6f5ffe7bf60ebfd0d36513fbcda0eb0ea1a (patch)
tree4992e52cff96da38bedfb7805c18ac97c9ae9c01 /drivers/pci/intel-iommu.c
parentea6606b02fc3192f2edab2db669fa0b9756b4e67 (diff)
Change intel iommu APIs of virtual machine domain
These APIs are used by KVM to use VT-d Signed-off-by: Weidong Han <weidong.han@intel.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c129
1 files changed, 59 insertions, 70 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 8a204d5bb427..f1380269cabd 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -2944,96 +2944,87 @@ static void vm_domain_exit(struct dmar_domain *domain)
2944 free_domain_mem(domain); 2944 free_domain_mem(domain);
2945} 2945}
2946 2946
2947void intel_iommu_domain_exit(struct dmar_domain *domain) 2947struct dmar_domain *intel_iommu_alloc_domain(void)
2948{ 2948{
2949 u64 end;
2950
2951 /* Domain 0 is reserved, so dont process it */
2952 if (!domain)
2953 return;
2954
2955 end = DOMAIN_MAX_ADDR(domain->gaw);
2956 end = end & (~VTD_PAGE_MASK);
2957
2958 /* clear ptes */
2959 dma_pte_clear_range(domain, 0, end);
2960
2961 /* free page tables */
2962 dma_pte_free_pagetable(domain, 0, end);
2963
2964 iommu_free_domain(domain);
2965 free_domain_mem(domain);
2966}
2967EXPORT_SYMBOL_GPL(intel_iommu_domain_exit);
2968
2969struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev)
2970{
2971 struct dmar_drhd_unit *drhd;
2972 struct dmar_domain *domain; 2949 struct dmar_domain *domain;
2973 struct intel_iommu *iommu;
2974 2950
2975 drhd = dmar_find_matched_drhd_unit(pdev); 2951 domain = iommu_alloc_vm_domain();
2976 if (!drhd) {
2977 printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n");
2978 return NULL;
2979 }
2980
2981 iommu = drhd->iommu;
2982 if (!iommu) {
2983 printk(KERN_ERR
2984 "intel_iommu_domain_alloc: iommu == NULL\n");
2985 return NULL;
2986 }
2987 domain = iommu_alloc_domain(iommu);
2988 if (!domain) { 2952 if (!domain) {
2989 printk(KERN_ERR 2953 printk(KERN_ERR
2990 "intel_iommu_domain_alloc: domain == NULL\n"); 2954 "intel_iommu_domain_alloc: domain == NULL\n");
2991 return NULL; 2955 return NULL;
2992 } 2956 }
2993 if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { 2957 if (vm_domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2994 printk(KERN_ERR 2958 printk(KERN_ERR
2995 "intel_iommu_domain_alloc: domain_init() failed\n"); 2959 "intel_iommu_domain_alloc: domain_init() failed\n");
2996 intel_iommu_domain_exit(domain); 2960 vm_domain_exit(domain);
2997 return NULL; 2961 return NULL;
2998 } 2962 }
2963
2999 return domain; 2964 return domain;
3000} 2965}
3001EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc); 2966EXPORT_SYMBOL_GPL(intel_iommu_alloc_domain);
3002 2967
3003int intel_iommu_context_mapping( 2968void intel_iommu_free_domain(struct dmar_domain *domain)
3004 struct dmar_domain *domain, struct pci_dev *pdev)
3005{ 2969{
3006 int rc; 2970 vm_domain_exit(domain);
3007 rc = domain_context_mapping(domain, pdev);
3008 return rc;
3009} 2971}
3010EXPORT_SYMBOL_GPL(intel_iommu_context_mapping); 2972EXPORT_SYMBOL_GPL(intel_iommu_free_domain);
3011 2973
3012int intel_iommu_page_mapping( 2974int intel_iommu_attach_device(struct dmar_domain *domain,
3013 struct dmar_domain *domain, dma_addr_t iova, 2975 struct pci_dev *pdev)
3014 u64 hpa, size_t size, int prot)
3015{ 2976{
3016 int rc; 2977 int ret;
3017 rc = domain_page_mapping(domain, iova, hpa, size, prot); 2978
3018 return rc; 2979 /* normally pdev is not mapped */
2980 if (unlikely(domain_context_mapped(pdev))) {
2981 struct dmar_domain *old_domain;
2982
2983 old_domain = find_domain(pdev);
2984 if (old_domain) {
2985 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
2986 vm_domain_remove_one_dev_info(old_domain, pdev);
2987 else
2988 domain_remove_dev_info(old_domain);
2989 }
2990 }
2991
2992 ret = domain_context_mapping(domain, pdev);
2993 if (ret)
2994 return ret;
2995
2996 ret = vm_domain_add_dev_info(domain, pdev);
2997 return ret;
3019} 2998}
3020EXPORT_SYMBOL_GPL(intel_iommu_page_mapping); 2999EXPORT_SYMBOL_GPL(intel_iommu_attach_device);
3021 3000
3022void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn) 3001void intel_iommu_detach_device(struct dmar_domain *domain,
3002 struct pci_dev *pdev)
3023{ 3003{
3024 struct intel_iommu *iommu; 3004 vm_domain_remove_one_dev_info(domain, pdev);
3005}
3006EXPORT_SYMBOL_GPL(intel_iommu_detach_device);
3025 3007
3026 iommu = device_to_iommu(bus, devfn); 3008int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova,
3027 iommu_detach_dev(iommu, bus, devfn); 3009 u64 hpa, size_t size, int prot)
3010{
3011 int ret;
3012 ret = domain_page_mapping(domain, iova, hpa, size, prot);
3013 return ret;
3028} 3014}
3029EXPORT_SYMBOL_GPL(intel_iommu_detach_dev); 3015EXPORT_SYMBOL_GPL(intel_iommu_map_address);
3030 3016
3031struct dmar_domain * 3017void intel_iommu_unmap_address(struct dmar_domain *domain,
3032intel_iommu_find_domain(struct pci_dev *pdev) 3018 dma_addr_t iova, size_t size)
3033{ 3019{
3034 return find_domain(pdev); 3020 dma_addr_t base;
3021
3022 /* The address might not be aligned */
3023 base = iova & VTD_PAGE_MASK;
3024 size = VTD_PAGE_ALIGN(size);
3025 dma_pte_clear_range(domain, base, base + size);
3035} 3026}
3036EXPORT_SYMBOL_GPL(intel_iommu_find_domain); 3027EXPORT_SYMBOL_GPL(intel_iommu_unmap_address);
3037 3028
3038int intel_iommu_found(void) 3029int intel_iommu_found(void)
3039{ 3030{
@@ -3041,17 +3032,15 @@ int intel_iommu_found(void)
3041} 3032}
3042EXPORT_SYMBOL_GPL(intel_iommu_found); 3033EXPORT_SYMBOL_GPL(intel_iommu_found);
3043 3034
3044u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) 3035u64 intel_iommu_iova_to_phys(struct dmar_domain *domain, u64 iova)
3045{ 3036{
3046 struct dma_pte *pte; 3037 struct dma_pte *pte;
3047 u64 pfn; 3038 u64 phys = 0;
3048 3039
3049 pfn = 0;
3050 pte = addr_to_dma_pte(domain, iova); 3040 pte = addr_to_dma_pte(domain, iova);
3051
3052 if (pte) 3041 if (pte)
3053 pfn = dma_pte_addr(pte); 3042 phys = dma_pte_addr(pte);
3054 3043
3055 return pfn >> VTD_PAGE_SHIFT; 3044 return phys;
3056} 3045}
3057EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); 3046EXPORT_SYMBOL_GPL(intel_iommu_iova_to_phys);