diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-05-18 10:38:55 -0400 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-05-28 12:14:35 -0400 |
commit | 9cabe89b99773e682538a8809abc7d4000c77083 (patch) | |
tree | f3deb81b637fce3fd84d30e565b1adcd23eae338 /arch/x86/kernel/amd_iommu.c | |
parent | 803b8cb4d9a93b90c67aba2aab7f2c54d595b5b9 (diff) |
amd-iommu: move aperture_range allocation code to seperate function
This patch prepares the dynamic increasement of dma_ops domain
apertures.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 95 |
1 files changed, 59 insertions, 36 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index c1a08b9119c9..8ff02ee69e86 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -620,6 +620,59 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, | |||
620 | * called with domain->lock held | 620 | * called with domain->lock held |
621 | */ | 621 | */ |
622 | 622 | ||
623 | /* | ||
624 | * This function is used to add a new aperture range to an existing | ||
625 | * aperture in case of dma_ops domain allocation or address allocation | ||
626 | * failure. | ||
627 | */ | ||
628 | static int alloc_new_range(struct dma_ops_domain *dma_dom, | ||
629 | bool populate, gfp_t gfp) | ||
630 | { | ||
631 | int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; | ||
632 | |||
633 | if (index >= APERTURE_MAX_RANGES) | ||
634 | return -ENOMEM; | ||
635 | |||
636 | dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp); | ||
637 | if (!dma_dom->aperture[index]) | ||
638 | return -ENOMEM; | ||
639 | |||
640 | dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp); | ||
641 | if (!dma_dom->aperture[index]->bitmap) | ||
642 | goto out_free; | ||
643 | |||
644 | dma_dom->aperture[index]->offset = dma_dom->aperture_size; | ||
645 | |||
646 | if (populate) { | ||
647 | unsigned long address = dma_dom->aperture_size; | ||
648 | int i, num_ptes = APERTURE_RANGE_PAGES / 512; | ||
649 | u64 *pte, *pte_page; | ||
650 | |||
651 | for (i = 0; i < num_ptes; ++i) { | ||
652 | pte = alloc_pte(&dma_dom->domain, address, | ||
653 | &pte_page, gfp); | ||
654 | if (!pte) | ||
655 | goto out_free; | ||
656 | |||
657 | dma_dom->aperture[index]->pte_pages[i] = pte_page; | ||
658 | |||
659 | address += APERTURE_RANGE_SIZE / 64; | ||
660 | } | ||
661 | } | ||
662 | |||
663 | dma_dom->aperture_size += APERTURE_RANGE_SIZE; | ||
664 | |||
665 | return 0; | ||
666 | |||
667 | out_free: | ||
668 | free_page((unsigned long)dma_dom->aperture[index]->bitmap); | ||
669 | |||
670 | kfree(dma_dom->aperture[index]); | ||
671 | dma_dom->aperture[index] = NULL; | ||
672 | |||
673 | return -ENOMEM; | ||
674 | } | ||
675 | |||
623 | static unsigned long dma_ops_area_alloc(struct device *dev, | 676 | static unsigned long dma_ops_area_alloc(struct device *dev, |
624 | struct dma_ops_domain *dom, | 677 | struct dma_ops_domain *dom, |
625 | unsigned int pages, | 678 | unsigned int pages, |
@@ -832,9 +885,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | |||
832 | unsigned order) | 885 | unsigned order) |
833 | { | 886 | { |
834 | struct dma_ops_domain *dma_dom; | 887 | struct dma_ops_domain *dma_dom; |
835 | unsigned i, num_pte_pages; | ||
836 | u64 *l2_pde; | ||
837 | u64 address; | ||
838 | 888 | ||
839 | /* | 889 | /* |
840 | * Currently the DMA aperture must be between 32 MB and 1GB in size | 890 | * Currently the DMA aperture must be between 32 MB and 1GB in size |
@@ -846,11 +896,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | |||
846 | if (!dma_dom) | 896 | if (!dma_dom) |
847 | return NULL; | 897 | return NULL; |
848 | 898 | ||
849 | dma_dom->aperture[0] = kzalloc(sizeof(struct aperture_range), | ||
850 | GFP_KERNEL); | ||
851 | if (!dma_dom->aperture[0]) | ||
852 | goto free_dma_dom; | ||
853 | |||
854 | spin_lock_init(&dma_dom->domain.lock); | 899 | spin_lock_init(&dma_dom->domain.lock); |
855 | 900 | ||
856 | dma_dom->domain.id = domain_id_alloc(); | 901 | dma_dom->domain.id = domain_id_alloc(); |
@@ -862,10 +907,13 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | |||
862 | dma_dom->domain.priv = dma_dom; | 907 | dma_dom->domain.priv = dma_dom; |
863 | if (!dma_dom->domain.pt_root) | 908 | if (!dma_dom->domain.pt_root) |
864 | goto free_dma_dom; | 909 | goto free_dma_dom; |
865 | dma_dom->aperture_size = APERTURE_RANGE_SIZE; | 910 | |
866 | dma_dom->aperture[0]->bitmap = (void *)get_zeroed_page(GFP_KERNEL); | 911 | dma_dom->need_flush = false; |
867 | if (!dma_dom->aperture[0]->bitmap) | 912 | dma_dom->target_dev = 0xffff; |
913 | |||
914 | if (alloc_new_range(dma_dom, true, GFP_KERNEL)) | ||
868 | goto free_dma_dom; | 915 | goto free_dma_dom; |
916 | |||
869 | /* | 917 | /* |
870 | * mark the first page as allocated so we never return 0 as | 918 | * mark the first page as allocated so we never return 0 as |
871 | * a valid dma-address. So we can use 0 as error value | 919 | * a valid dma-address. So we can use 0 as error value |
@@ -873,9 +921,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | |||
873 | dma_dom->aperture[0]->bitmap[0] = 1; | 921 | dma_dom->aperture[0]->bitmap[0] = 1; |
874 | dma_dom->next_address = 0; | 922 | dma_dom->next_address = 0; |
875 | 923 | ||
876 | dma_dom->need_flush = false; | ||
877 | dma_dom->target_dev = 0xffff; | ||
878 | |||
879 | /* Intialize the exclusion range if necessary */ | 924 | /* Intialize the exclusion range if necessary */ |
880 | if (iommu->exclusion_start && | 925 | if (iommu->exclusion_start && |
881 | iommu->exclusion_start < dma_dom->aperture_size) { | 926 | iommu->exclusion_start < dma_dom->aperture_size) { |
@@ -886,28 +931,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | |||
886 | dma_ops_reserve_addresses(dma_dom, startpage, pages); | 931 | dma_ops_reserve_addresses(dma_dom, startpage, pages); |
887 | } | 932 | } |
888 | 933 | ||
889 | /* | ||
890 | * At the last step, build the page tables so we don't need to | ||
891 | * allocate page table pages in the dma_ops mapping/unmapping | ||
892 | * path for the first 128MB of dma address space. | ||
893 | */ | ||
894 | num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512); | ||
895 | |||
896 | l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL); | ||
897 | if (l2_pde == NULL) | ||
898 | goto free_dma_dom; | ||
899 | |||
900 | dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde)); | ||
901 | |||
902 | for (i = 0; i < num_pte_pages; ++i) { | ||
903 | u64 **pte_page = &dma_dom->aperture[0]->pte_pages[i]; | ||
904 | *pte_page = (u64 *)get_zeroed_page(GFP_KERNEL); | ||
905 | if (!*pte_page) | ||
906 | goto free_dma_dom; | ||
907 | address = virt_to_phys(*pte_page); | ||
908 | l2_pde[i] = IOMMU_L1_PDE(address); | ||
909 | } | ||
910 | |||
911 | return dma_dom; | 934 | return dma_dom; |
912 | 935 | ||
913 | free_dma_dom: | 936 | free_dma_dom: |