diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-18 10:22:37 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-18 10:22:37 -0400 |
commit | 8123d8f17d8ba9d67e556688e4f025456ca97842 (patch) | |
tree | 1d15088a32644e464ad3536ad7bec775050065eb /drivers | |
parent | 06ee772043c7ad125f2c2e6a08dc563706f39e8d (diff) | |
parent | 795e74f7a69f9c08afa4fa7c86cc4f18a62bd630 (diff) |
Merge branch 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86/amd-iommu: Add amd_iommu=off command line option
iommu-api: Remove iommu_{un}map_range functions
x86/amd-iommu: Implement ->{un}map callbacks for iommu-api
x86/amd-iommu: Make amd_iommu_iova_to_phys aware of multiple page sizes
x86/amd-iommu: Make iommu_unmap_page and fetch_pte aware of page sizes
x86/amd-iommu: Make iommu_map_page and alloc_pte aware of page sizes
kvm: Change kvm_iommu_map_pages to map large pages
VT-d: Change {un}map_range functions to implement {un}map interface
iommu-api: Add ->{un}map callbacks to iommu_ops
iommu-api: Add iommu_map and iommu_unmap functions
iommu-api: Rename ->{un}map function pointers to ->{un}map_range
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/base/iommu.c | 43 | ||||
-rw-r--r-- | drivers/pci/intel-iommu.c | 22 |
2 files changed, 41 insertions, 24 deletions
diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c index 8ad4ffea6920..6e6b6a11b3ce 100644 --- a/drivers/base/iommu.c +++ b/drivers/base/iommu.c | |||
@@ -80,20 +80,6 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) | |||
80 | } | 80 | } |
81 | EXPORT_SYMBOL_GPL(iommu_detach_device); | 81 | EXPORT_SYMBOL_GPL(iommu_detach_device); |
82 | 82 | ||
83 | int iommu_map_range(struct iommu_domain *domain, unsigned long iova, | ||
84 | phys_addr_t paddr, size_t size, int prot) | ||
85 | { | ||
86 | return iommu_ops->map(domain, iova, paddr, size, prot); | ||
87 | } | ||
88 | EXPORT_SYMBOL_GPL(iommu_map_range); | ||
89 | |||
90 | void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, | ||
91 | size_t size) | ||
92 | { | ||
93 | iommu_ops->unmap(domain, iova, size); | ||
94 | } | ||
95 | EXPORT_SYMBOL_GPL(iommu_unmap_range); | ||
96 | |||
97 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 83 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, |
98 | unsigned long iova) | 84 | unsigned long iova) |
99 | { | 85 | { |
@@ -107,3 +93,32 @@ int iommu_domain_has_cap(struct iommu_domain *domain, | |||
107 | return iommu_ops->domain_has_cap(domain, cap); | 93 | return iommu_ops->domain_has_cap(domain, cap); |
108 | } | 94 | } |
109 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); | 95 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); |
96 | |||
97 | int iommu_map(struct iommu_domain *domain, unsigned long iova, | ||
98 | phys_addr_t paddr, int gfp_order, int prot) | ||
99 | { | ||
100 | unsigned long invalid_mask; | ||
101 | size_t size; | ||
102 | |||
103 | size = 0x1000UL << gfp_order; | ||
104 | invalid_mask = size - 1; | ||
105 | |||
106 | BUG_ON((iova | paddr) & invalid_mask); | ||
107 | |||
108 | return iommu_ops->map(domain, iova, paddr, gfp_order, prot); | ||
109 | } | ||
110 | EXPORT_SYMBOL_GPL(iommu_map); | ||
111 | |||
112 | int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) | ||
113 | { | ||
114 | unsigned long invalid_mask; | ||
115 | size_t size; | ||
116 | |||
117 | size = 0x1000UL << gfp_order; | ||
118 | invalid_mask = size - 1; | ||
119 | |||
120 | BUG_ON(iova & invalid_mask); | ||
121 | |||
122 | return iommu_ops->unmap(domain, iova, gfp_order); | ||
123 | } | ||
124 | EXPORT_SYMBOL_GPL(iommu_unmap); | ||
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 417312528ddf..371dc564e2e4 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -3626,14 +3626,15 @@ static void intel_iommu_detach_device(struct iommu_domain *domain, | |||
3626 | domain_remove_one_dev_info(dmar_domain, pdev); | 3626 | domain_remove_one_dev_info(dmar_domain, pdev); |
3627 | } | 3627 | } |
3628 | 3628 | ||
3629 | static int intel_iommu_map_range(struct iommu_domain *domain, | 3629 | static int intel_iommu_map(struct iommu_domain *domain, |
3630 | unsigned long iova, phys_addr_t hpa, | 3630 | unsigned long iova, phys_addr_t hpa, |
3631 | size_t size, int iommu_prot) | 3631 | int gfp_order, int iommu_prot) |
3632 | { | 3632 | { |
3633 | struct dmar_domain *dmar_domain = domain->priv; | 3633 | struct dmar_domain *dmar_domain = domain->priv; |
3634 | u64 max_addr; | 3634 | u64 max_addr; |
3635 | int addr_width; | 3635 | int addr_width; |
3636 | int prot = 0; | 3636 | int prot = 0; |
3637 | size_t size; | ||
3637 | int ret; | 3638 | int ret; |
3638 | 3639 | ||
3639 | if (iommu_prot & IOMMU_READ) | 3640 | if (iommu_prot & IOMMU_READ) |
@@ -3643,6 +3644,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain, | |||
3643 | if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) | 3644 | if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) |
3644 | prot |= DMA_PTE_SNP; | 3645 | prot |= DMA_PTE_SNP; |
3645 | 3646 | ||
3647 | size = PAGE_SIZE << gfp_order; | ||
3646 | max_addr = iova + size; | 3648 | max_addr = iova + size; |
3647 | if (dmar_domain->max_addr < max_addr) { | 3649 | if (dmar_domain->max_addr < max_addr) { |
3648 | int min_agaw; | 3650 | int min_agaw; |
@@ -3669,19 +3671,19 @@ static int intel_iommu_map_range(struct iommu_domain *domain, | |||
3669 | return ret; | 3671 | return ret; |
3670 | } | 3672 | } |
3671 | 3673 | ||
3672 | static void intel_iommu_unmap_range(struct iommu_domain *domain, | 3674 | static int intel_iommu_unmap(struct iommu_domain *domain, |
3673 | unsigned long iova, size_t size) | 3675 | unsigned long iova, int gfp_order) |
3674 | { | 3676 | { |
3675 | struct dmar_domain *dmar_domain = domain->priv; | 3677 | struct dmar_domain *dmar_domain = domain->priv; |
3676 | 3678 | size_t size = PAGE_SIZE << gfp_order; | |
3677 | if (!size) | ||
3678 | return; | ||
3679 | 3679 | ||
3680 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, | 3680 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, |
3681 | (iova + size - 1) >> VTD_PAGE_SHIFT); | 3681 | (iova + size - 1) >> VTD_PAGE_SHIFT); |
3682 | 3682 | ||
3683 | if (dmar_domain->max_addr == iova + size) | 3683 | if (dmar_domain->max_addr == iova + size) |
3684 | dmar_domain->max_addr = iova; | 3684 | dmar_domain->max_addr = iova; |
3685 | |||
3686 | return gfp_order; | ||
3685 | } | 3687 | } |
3686 | 3688 | ||
3687 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | 3689 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
@@ -3714,8 +3716,8 @@ static struct iommu_ops intel_iommu_ops = { | |||
3714 | .domain_destroy = intel_iommu_domain_destroy, | 3716 | .domain_destroy = intel_iommu_domain_destroy, |
3715 | .attach_dev = intel_iommu_attach_device, | 3717 | .attach_dev = intel_iommu_attach_device, |
3716 | .detach_dev = intel_iommu_detach_device, | 3718 | .detach_dev = intel_iommu_detach_device, |
3717 | .map = intel_iommu_map_range, | 3719 | .map = intel_iommu_map, |
3718 | .unmap = intel_iommu_unmap_range, | 3720 | .unmap = intel_iommu_unmap, |
3719 | .iova_to_phys = intel_iommu_iova_to_phys, | 3721 | .iova_to_phys = intel_iommu_iova_to_phys, |
3720 | .domain_has_cap = intel_iommu_domain_has_cap, | 3722 | .domain_has_cap = intel_iommu_domain_has_cap, |
3721 | }; | 3723 | }; |