aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-05-19 03:52:40 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2009-05-28 12:15:19 -0400
commit00cd122ae5e5e7c60cce2af3c35b190d4c3f2d0d (patch)
tree912509968a010b442f59664ac3287bae035e2b87 /arch/x86/kernel/amd_iommu.c
parent9cabe89b99773e682538a8809abc7d4000c77083 (diff)
amd-iommu: handle exlusion ranges and unity mappings in alloc_new_range
This patch makes sure no reserved addresses are allocated in an dma_ops domain when the aperture is increased dynamically. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c71
1 files changed, 60 insertions, 11 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 8ff02ee69e86..59ee1b94a7ce 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -58,6 +58,9 @@ static struct dma_ops_domain *find_protection_domain(u16 devid);
58static u64* alloc_pte(struct protection_domain *dom, 58static u64* alloc_pte(struct protection_domain *dom,
59 unsigned long address, u64 59 unsigned long address, u64
60 **pte_page, gfp_t gfp); 60 **pte_page, gfp_t gfp);
61static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
62 unsigned long start_page,
63 unsigned int pages);
61 64
62#ifdef CONFIG_AMD_IOMMU_STATS 65#ifdef CONFIG_AMD_IOMMU_STATS
63 66
@@ -621,14 +624,42 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
621 */ 624 */
622 625
623/* 626/*
627 * This function checks if there is a PTE for a given dma address. If
628 * there is one, it returns the pointer to it.
629 */
630static u64* fetch_pte(struct protection_domain *domain,
631 unsigned long address)
632{
633 u64 *pte;
634
635 pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(address)];
636
637 if (!IOMMU_PTE_PRESENT(*pte))
638 return NULL;
639
640 pte = IOMMU_PTE_PAGE(*pte);
641 pte = &pte[IOMMU_PTE_L1_INDEX(address)];
642
643 if (!IOMMU_PTE_PRESENT(*pte))
644 return NULL;
645
646 pte = IOMMU_PTE_PAGE(*pte);
647 pte = &pte[IOMMU_PTE_L0_INDEX(address)];
648
649 return pte;
650}
651
652/*
624 * This function is used to add a new aperture range to an existing 653 * This function is used to add a new aperture range to an existing
625 * aperture in case of dma_ops domain allocation or address allocation 654 * aperture in case of dma_ops domain allocation or address allocation
626 * failure. 655 * failure.
627 */ 656 */
628static int alloc_new_range(struct dma_ops_domain *dma_dom, 657static int alloc_new_range(struct amd_iommu *iommu,
658 struct dma_ops_domain *dma_dom,
629 bool populate, gfp_t gfp) 659 bool populate, gfp_t gfp)
630{ 660{
631 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; 661 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
662 int i;
632 663
633 if (index >= APERTURE_MAX_RANGES) 664 if (index >= APERTURE_MAX_RANGES)
634 return -ENOMEM; 665 return -ENOMEM;
@@ -662,6 +693,33 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
662 693
663 dma_dom->aperture_size += APERTURE_RANGE_SIZE; 694 dma_dom->aperture_size += APERTURE_RANGE_SIZE;
664 695
696 /* Intialize the exclusion range if necessary */
697 if (iommu->exclusion_start &&
698 iommu->exclusion_start >= dma_dom->aperture[index]->offset &&
699 iommu->exclusion_start < dma_dom->aperture_size) {
700 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
701 int pages = iommu_num_pages(iommu->exclusion_start,
702 iommu->exclusion_length,
703 PAGE_SIZE);
704 dma_ops_reserve_addresses(dma_dom, startpage, pages);
705 }
706
707 /*
708 * Check for areas already mapped as present in the new aperture
709 * range and mark those pages as reserved in the allocator. Such
710 * mappings may already exist as a result of requested unity
711 * mappings for devices.
712 */
713 for (i = dma_dom->aperture[index]->offset;
714 i < dma_dom->aperture_size;
715 i += PAGE_SIZE) {
716 u64 *pte = fetch_pte(&dma_dom->domain, i);
717 if (!pte || !IOMMU_PTE_PRESENT(*pte))
718 continue;
719
720 dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
721 }
722
665 return 0; 723 return 0;
666 724
667out_free: 725out_free:
@@ -911,7 +969,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
911 dma_dom->need_flush = false; 969 dma_dom->need_flush = false;
912 dma_dom->target_dev = 0xffff; 970 dma_dom->target_dev = 0xffff;
913 971
914 if (alloc_new_range(dma_dom, true, GFP_KERNEL)) 972 if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL))
915 goto free_dma_dom; 973 goto free_dma_dom;
916 974
917 /* 975 /*
@@ -921,15 +979,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
921 dma_dom->aperture[0]->bitmap[0] = 1; 979 dma_dom->aperture[0]->bitmap[0] = 1;
922 dma_dom->next_address = 0; 980 dma_dom->next_address = 0;
923 981
924 /* Intialize the exclusion range if necessary */
925 if (iommu->exclusion_start &&
926 iommu->exclusion_start < dma_dom->aperture_size) {
927 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
928 int pages = iommu_num_pages(iommu->exclusion_start,
929 iommu->exclusion_length,
930 PAGE_SIZE);
931 dma_ops_reserve_addresses(dma_dom, startpage, pages);
932 }
933 982
934 return dma_dom; 983 return dma_dom;
935 984