aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-05-15 06:30:05 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2009-05-28 12:14:15 -0400
commit384de72910a7bf96a02a6d8023fe9e16d872beb2 (patch)
tree623a37e8e37180b505e1d997ee83c93528344d55 /arch/x86/kernel/amd_iommu.c
parent53812c115cda1f660b286c939669154a56976f6b (diff)
amd-iommu: make address allocator aware of multiple aperture ranges
This patch changes the AMD IOMMU address allocator to allow up to 32 aperture ranges per dma_ops domain. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c138
1 files changed, 101 insertions, 37 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index a467addb44b..794163ae97b 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -578,7 +578,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
578 */ 578 */
579 if (addr < dma_dom->aperture_size) 579 if (addr < dma_dom->aperture_size)
580 __set_bit(addr >> PAGE_SHIFT, 580 __set_bit(addr >> PAGE_SHIFT,
581 dma_dom->aperture.bitmap); 581 dma_dom->aperture[0]->bitmap);
582 } 582 }
583 583
584 return 0; 584 return 0;
@@ -615,43 +615,74 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
615 ****************************************************************************/ 615 ****************************************************************************/
616 616
617/* 617/*
618 * The address allocator core function. 618 * The address allocator core functions.
619 * 619 *
620 * called with domain->lock held 620 * called with domain->lock held
621 */ 621 */
622
623static unsigned long dma_ops_area_alloc(struct device *dev,
624 struct dma_ops_domain *dom,
625 unsigned int pages,
626 unsigned long align_mask,
627 u64 dma_mask,
628 unsigned long start)
629{
630 unsigned long next_bit = dom->next_bit % APERTURE_RANGE_PAGES;
631 int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
632 int i = start >> APERTURE_RANGE_SHIFT;
633 unsigned long boundary_size;
634 unsigned long address = -1;
635 unsigned long limit;
636
637 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
638 PAGE_SIZE) >> PAGE_SHIFT;
639
640 for (;i < max_index; ++i) {
641 unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
642
643 if (dom->aperture[i]->offset >= dma_mask)
644 break;
645
646 limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
647 dma_mask >> PAGE_SHIFT);
648
649 address = iommu_area_alloc(dom->aperture[i]->bitmap,
650 limit, next_bit, pages, 0,
651 boundary_size, align_mask);
652 if (address != -1) {
653 address = dom->aperture[i]->offset +
654 (address << PAGE_SHIFT);
655 dom->next_bit = (address >> PAGE_SHIFT) + pages;
656 break;
657 }
658
659 next_bit = 0;
660 }
661
662 return address;
663}
664
622static unsigned long dma_ops_alloc_addresses(struct device *dev, 665static unsigned long dma_ops_alloc_addresses(struct device *dev,
623 struct dma_ops_domain *dom, 666 struct dma_ops_domain *dom,
624 unsigned int pages, 667 unsigned int pages,
625 unsigned long align_mask, 668 unsigned long align_mask,
626 u64 dma_mask) 669 u64 dma_mask)
627{ 670{
628 unsigned long limit;
629 unsigned long address; 671 unsigned long address;
630 unsigned long boundary_size; 672 unsigned long start = dom->next_bit << PAGE_SHIFT;
631 673
632 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
633 PAGE_SIZE) >> PAGE_SHIFT;
634 limit = iommu_device_max_index(dom->aperture_size >> PAGE_SHIFT, 0,
635 dma_mask >> PAGE_SHIFT);
636 674
637 if (dom->next_bit >= limit) { 675 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
638 dom->next_bit = 0; 676 dma_mask, start);
639 dom->need_flush = true;
640 }
641 677
642 address = iommu_area_alloc(dom->aperture.bitmap, limit, dom->next_bit,
643 pages, 0 , boundary_size, align_mask);
644 if (address == -1) { 678 if (address == -1) {
645 address = iommu_area_alloc(dom->aperture.bitmap, limit, 0, 679 dom->next_bit = 0;
646 pages, 0, boundary_size, 680 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
647 align_mask); 681 dma_mask, 0);
648 dom->need_flush = true; 682 dom->need_flush = true;
649 } 683 }
650 684
651 if (likely(address != -1)) { 685 if (unlikely(address == -1))
652 dom->next_bit = address + pages;
653 address <<= PAGE_SHIFT;
654 } else
655 address = bad_dma_address; 686 address = bad_dma_address;
656 687
657 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); 688 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
@@ -668,11 +699,17 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
668 unsigned long address, 699 unsigned long address,
669 unsigned int pages) 700 unsigned int pages)
670{ 701{
671 address >>= PAGE_SHIFT; 702 unsigned i = address >> APERTURE_RANGE_SHIFT;
672 iommu_area_free(dom->aperture.bitmap, address, pages); 703 struct aperture_range *range = dom->aperture[i];
704
705 BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
673 706
674 if (address >= dom->next_bit) 707 if ((address >> PAGE_SHIFT) >= dom->next_bit)
675 dom->need_flush = true; 708 dom->need_flush = true;
709
710 address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
711 iommu_area_free(range->bitmap, address, pages);
712
676} 713}
677 714
678/**************************************************************************** 715/****************************************************************************
@@ -720,12 +757,16 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
720 unsigned long start_page, 757 unsigned long start_page,
721 unsigned int pages) 758 unsigned int pages)
722{ 759{
723 unsigned int last_page = dom->aperture_size >> PAGE_SHIFT; 760 unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
724 761
725 if (start_page + pages > last_page) 762 if (start_page + pages > last_page)
726 pages = last_page - start_page; 763 pages = last_page - start_page;
727 764
728 iommu_area_reserve(dom->aperture.bitmap, start_page, pages); 765 for (i = start_page; i < start_page + pages; ++i) {
766 int index = i / APERTURE_RANGE_PAGES;
767 int page = i % APERTURE_RANGE_PAGES;
768 __set_bit(page, dom->aperture[index]->bitmap);
769 }
729} 770}
730 771
731static void free_pagetable(struct protection_domain *domain) 772static void free_pagetable(struct protection_domain *domain)
@@ -764,12 +805,19 @@ static void free_pagetable(struct protection_domain *domain)
764 */ 805 */
765static void dma_ops_domain_free(struct dma_ops_domain *dom) 806static void dma_ops_domain_free(struct dma_ops_domain *dom)
766{ 807{
808 int i;
809
767 if (!dom) 810 if (!dom)
768 return; 811 return;
769 812
770 free_pagetable(&dom->domain); 813 free_pagetable(&dom->domain);
771 814
772 free_page((unsigned long)dom->aperture.bitmap); 815 for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
816 if (!dom->aperture[i])
817 continue;
818 free_page((unsigned long)dom->aperture[i]->bitmap);
819 kfree(dom->aperture[i]);
820 }
773 821
774 kfree(dom); 822 kfree(dom);
775} 823}
@@ -797,6 +845,11 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
797 if (!dma_dom) 845 if (!dma_dom)
798 return NULL; 846 return NULL;
799 847
848 dma_dom->aperture[0] = kzalloc(sizeof(struct aperture_range),
849 GFP_KERNEL);
850 if (!dma_dom->aperture[0])
851 goto free_dma_dom;
852
800 spin_lock_init(&dma_dom->domain.lock); 853 spin_lock_init(&dma_dom->domain.lock);
801 854
802 dma_dom->domain.id = domain_id_alloc(); 855 dma_dom->domain.id = domain_id_alloc();
@@ -809,14 +862,14 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
809 if (!dma_dom->domain.pt_root) 862 if (!dma_dom->domain.pt_root)
810 goto free_dma_dom; 863 goto free_dma_dom;
811 dma_dom->aperture_size = APERTURE_RANGE_SIZE; 864 dma_dom->aperture_size = APERTURE_RANGE_SIZE;
812 dma_dom->aperture.bitmap = (void *)get_zeroed_page(GFP_KERNEL); 865 dma_dom->aperture[0]->bitmap = (void *)get_zeroed_page(GFP_KERNEL);
813 if (!dma_dom->aperture.bitmap) 866 if (!dma_dom->aperture[0]->bitmap)
814 goto free_dma_dom; 867 goto free_dma_dom;
815 /* 868 /*
816 * mark the first page as allocated so we never return 0 as 869 * mark the first page as allocated so we never return 0 as
817 * a valid dma-address. So we can use 0 as error value 870 * a valid dma-address. So we can use 0 as error value
818 */ 871 */
819 dma_dom->aperture.bitmap[0] = 1; 872 dma_dom->aperture[0]->bitmap[0] = 1;
820 dma_dom->next_bit = 0; 873 dma_dom->next_bit = 0;
821 874
822 dma_dom->need_flush = false; 875 dma_dom->need_flush = false;
@@ -846,7 +899,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
846 dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde)); 899 dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde));
847 900
848 for (i = 0; i < num_pte_pages; ++i) { 901 for (i = 0; i < num_pte_pages; ++i) {
849 u64 **pte_page = &dma_dom->aperture.pte_pages[i]; 902 u64 **pte_page = &dma_dom->aperture[0]->pte_pages[i];
850 *pte_page = (u64 *)get_zeroed_page(GFP_KERNEL); 903 *pte_page = (u64 *)get_zeroed_page(GFP_KERNEL);
851 if (!*pte_page) 904 if (!*pte_page)
852 goto free_dma_dom; 905 goto free_dma_dom;
@@ -1164,14 +1217,19 @@ static u64* alloc_pte(struct protection_domain *dom,
1164static u64* dma_ops_get_pte(struct dma_ops_domain *dom, 1217static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
1165 unsigned long address) 1218 unsigned long address)
1166{ 1219{
1167 struct aperture_range *aperture = &dom->aperture; 1220 struct aperture_range *aperture;
1168 u64 *pte, *pte_page; 1221 u64 *pte, *pte_page;
1169 1222
1170 pte = aperture->pte_pages[IOMMU_PTE_L1_INDEX(address)]; 1223 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
1224 if (!aperture)
1225 return NULL;
1226
1227 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
1171 if (!pte) { 1228 if (!pte) {
1172 pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC); 1229 pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC);
1173 aperture->pte_pages[IOMMU_PTE_L1_INDEX(address)] = pte_page; 1230 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
1174 } 1231 } else
1232 pte += IOMMU_PTE_L0_INDEX(address);
1175 1233
1176 return pte; 1234 return pte;
1177} 1235}
@@ -1219,14 +1277,20 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
1219 struct dma_ops_domain *dom, 1277 struct dma_ops_domain *dom,
1220 unsigned long address) 1278 unsigned long address)
1221{ 1279{
1280 struct aperture_range *aperture;
1222 u64 *pte; 1281 u64 *pte;
1223 1282
1224 if (address >= dom->aperture_size) 1283 if (address >= dom->aperture_size)
1225 return; 1284 return;
1226 1285
1227 WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size); 1286 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
1287 if (!aperture)
1288 return;
1289
1290 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
1291 if (!pte)
1292 return;
1228 1293
1229 pte = dom->aperture.pte_pages[IOMMU_PTE_L1_INDEX(address)];
1230 pte += IOMMU_PTE_L0_INDEX(address); 1294 pte += IOMMU_PTE_L0_INDEX(address);
1231 1295
1232 WARN_ON(!*pte); 1296 WARN_ON(!*pte);