aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-11-23 13:08:46 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-11-27 08:17:01 -0500
commit576175c2503ae9b0f930ee9a6a0abaf7ef8956ad (patch)
treefde988b91b94d41d848e780e64e22053dc74551f /arch
parent680525e06ddccda8c51bdddf532cd5b7d950c411 (diff)
x86/amd-iommu: Make alloc_new_range aware of multiple IOMMUs
Since the assumption that an dma_ops domain is only bound to one IOMMU was given up we need to make alloc_new_range aware of it. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/amd_iommu.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index da3f9d8ee395..687f617b95d7 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -788,11 +788,11 @@ static u64 *fetch_pte(struct protection_domain *domain,
788 * aperture in case of dma_ops domain allocation or address allocation 788 * aperture in case of dma_ops domain allocation or address allocation
789 * failure. 789 * failure.
790 */ 790 */
791static int alloc_new_range(struct amd_iommu *iommu, 791static int alloc_new_range(struct dma_ops_domain *dma_dom,
792 struct dma_ops_domain *dma_dom,
793 bool populate, gfp_t gfp) 792 bool populate, gfp_t gfp)
794{ 793{
795 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; 794 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
795 struct amd_iommu *iommu;
796 int i; 796 int i;
797 797
798#ifdef CONFIG_IOMMU_STRESS 798#ifdef CONFIG_IOMMU_STRESS
@@ -832,14 +832,17 @@ static int alloc_new_range(struct amd_iommu *iommu,
832 dma_dom->aperture_size += APERTURE_RANGE_SIZE; 832 dma_dom->aperture_size += APERTURE_RANGE_SIZE;
833 833
834 /* Intialize the exclusion range if necessary */ 834 /* Intialize the exclusion range if necessary */
835 if (iommu->exclusion_start && 835 for_each_iommu(iommu) {
836 iommu->exclusion_start >= dma_dom->aperture[index]->offset && 836 if (iommu->exclusion_start &&
837 iommu->exclusion_start < dma_dom->aperture_size) { 837 iommu->exclusion_start >= dma_dom->aperture[index]->offset
838 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; 838 && iommu->exclusion_start < dma_dom->aperture_size) {
839 int pages = iommu_num_pages(iommu->exclusion_start, 839 unsigned long startpage;
840 iommu->exclusion_length, 840 int pages = iommu_num_pages(iommu->exclusion_start,
841 PAGE_SIZE); 841 iommu->exclusion_length,
842 dma_ops_reserve_addresses(dma_dom, startpage, pages); 842 PAGE_SIZE);
843 startpage = iommu->exclusion_start >> PAGE_SHIFT;
844 dma_ops_reserve_addresses(dma_dom, startpage, pages);
845 }
843 } 846 }
844 847
845 /* 848 /*
@@ -1143,7 +1146,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
1143 1146
1144 add_domain_to_list(&dma_dom->domain); 1147 add_domain_to_list(&dma_dom->domain);
1145 1148
1146 if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL)) 1149 if (alloc_new_range(dma_dom, true, GFP_KERNEL))
1147 goto free_dma_dom; 1150 goto free_dma_dom;
1148 1151
1149 /* 1152 /*
@@ -1686,7 +1689,7 @@ retry:
1686 */ 1689 */
1687 dma_dom->next_address = dma_dom->aperture_size; 1690 dma_dom->next_address = dma_dom->aperture_size;
1688 1691
1689 if (alloc_new_range(iommu, dma_dom, false, GFP_ATOMIC)) 1692 if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
1690 goto out; 1693 goto out;
1691 1694
1692 /* 1695 /*