diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-05-12 04:56:44 -0400 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-05-28 12:12:52 -0400 |
commit | c3239567a20e90e3026ac5453d5267506ef7b030 (patch) | |
tree | 403aea58ec7d0e4d0f1233c4b4d472885ffe738d /arch/x86/kernel/amd_iommu.c | |
parent | 41fb454ebe6024f5c1e3b3cbc0abc0da762e7b51 (diff) |
amd-iommu: introduce aperture_range structure
This is a preperation for extended address allocator.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 46 |
1 files changed, 21 insertions, 25 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index a97db99dad52..62acd09cd19f 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -595,7 +595,8 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | |||
595 | * as allocated in the aperture | 595 | * as allocated in the aperture |
596 | */ | 596 | */ |
597 | if (addr < dma_dom->aperture_size) | 597 | if (addr < dma_dom->aperture_size) |
598 | __set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap); | 598 | __set_bit(addr >> PAGE_SHIFT, |
599 | dma_dom->aperture.bitmap); | ||
599 | } | 600 | } |
600 | 601 | ||
601 | return 0; | 602 | return 0; |
@@ -656,11 +657,12 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, | |||
656 | dom->need_flush = true; | 657 | dom->need_flush = true; |
657 | } | 658 | } |
658 | 659 | ||
659 | address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages, | 660 | address = iommu_area_alloc(dom->aperture.bitmap, limit, dom->next_bit, |
660 | 0 , boundary_size, align_mask); | 661 | pages, 0 , boundary_size, align_mask); |
661 | if (address == -1) { | 662 | if (address == -1) { |
662 | address = iommu_area_alloc(dom->bitmap, limit, 0, pages, | 663 | address = iommu_area_alloc(dom->aperture.bitmap, limit, 0, |
663 | 0, boundary_size, align_mask); | 664 | pages, 0, boundary_size, |
665 | align_mask); | ||
664 | dom->need_flush = true; | 666 | dom->need_flush = true; |
665 | } | 667 | } |
666 | 668 | ||
@@ -685,7 +687,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, | |||
685 | unsigned int pages) | 687 | unsigned int pages) |
686 | { | 688 | { |
687 | address >>= PAGE_SHIFT; | 689 | address >>= PAGE_SHIFT; |
688 | iommu_area_free(dom->bitmap, address, pages); | 690 | iommu_area_free(dom->aperture.bitmap, address, pages); |
689 | 691 | ||
690 | if (address >= dom->next_bit) | 692 | if (address >= dom->next_bit) |
691 | dom->need_flush = true; | 693 | dom->need_flush = true; |
@@ -741,7 +743,7 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, | |||
741 | if (start_page + pages > last_page) | 743 | if (start_page + pages > last_page) |
742 | pages = last_page - start_page; | 744 | pages = last_page - start_page; |
743 | 745 | ||
744 | iommu_area_reserve(dom->bitmap, start_page, pages); | 746 | iommu_area_reserve(dom->aperture.bitmap, start_page, pages); |
745 | } | 747 | } |
746 | 748 | ||
747 | static void free_pagetable(struct protection_domain *domain) | 749 | static void free_pagetable(struct protection_domain *domain) |
@@ -785,9 +787,7 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) | |||
785 | 787 | ||
786 | free_pagetable(&dom->domain); | 788 | free_pagetable(&dom->domain); |
787 | 789 | ||
788 | kfree(dom->pte_pages); | 790 | free_page((unsigned long)dom->aperture.bitmap); |
789 | |||
790 | kfree(dom->bitmap); | ||
791 | 791 | ||
792 | kfree(dom); | 792 | kfree(dom); |
793 | } | 793 | } |
@@ -826,16 +826,15 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | |||
826 | dma_dom->domain.priv = dma_dom; | 826 | dma_dom->domain.priv = dma_dom; |
827 | if (!dma_dom->domain.pt_root) | 827 | if (!dma_dom->domain.pt_root) |
828 | goto free_dma_dom; | 828 | goto free_dma_dom; |
829 | dma_dom->aperture_size = (1ULL << order); | 829 | dma_dom->aperture_size = APERTURE_RANGE_SIZE; |
830 | dma_dom->bitmap = kzalloc(dma_dom->aperture_size / (PAGE_SIZE * 8), | 830 | dma_dom->aperture.bitmap = (void *)get_zeroed_page(GFP_KERNEL); |
831 | GFP_KERNEL); | 831 | if (!dma_dom->aperture.bitmap) |
832 | if (!dma_dom->bitmap) | ||
833 | goto free_dma_dom; | 832 | goto free_dma_dom; |
834 | /* | 833 | /* |
835 | * mark the first page as allocated so we never return 0 as | 834 | * mark the first page as allocated so we never return 0 as |
836 | * a valid dma-address. So we can use 0 as error value | 835 | * a valid dma-address. So we can use 0 as error value |
837 | */ | 836 | */ |
838 | dma_dom->bitmap[0] = 1; | 837 | dma_dom->aperture.bitmap[0] = 1; |
839 | dma_dom->next_bit = 0; | 838 | dma_dom->next_bit = 0; |
840 | 839 | ||
841 | dma_dom->need_flush = false; | 840 | dma_dom->need_flush = false; |
@@ -854,13 +853,9 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | |||
854 | /* | 853 | /* |
855 | * At the last step, build the page tables so we don't need to | 854 | * At the last step, build the page tables so we don't need to |
856 | * allocate page table pages in the dma_ops mapping/unmapping | 855 | * allocate page table pages in the dma_ops mapping/unmapping |
857 | * path. | 856 | * path for the first 128MB of dma address space. |
858 | */ | 857 | */ |
859 | num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512); | 858 | num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512); |
860 | dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *), | ||
861 | GFP_KERNEL); | ||
862 | if (!dma_dom->pte_pages) | ||
863 | goto free_dma_dom; | ||
864 | 859 | ||
865 | l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL); | 860 | l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL); |
866 | if (l2_pde == NULL) | 861 | if (l2_pde == NULL) |
@@ -869,10 +864,11 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | |||
869 | dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde)); | 864 | dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde)); |
870 | 865 | ||
871 | for (i = 0; i < num_pte_pages; ++i) { | 866 | for (i = 0; i < num_pte_pages; ++i) { |
872 | dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL); | 867 | u64 **pte_page = &dma_dom->aperture.pte_pages[i]; |
873 | if (!dma_dom->pte_pages[i]) | 868 | *pte_page = (u64 *)get_zeroed_page(GFP_KERNEL); |
869 | if (!*pte_page) | ||
874 | goto free_dma_dom; | 870 | goto free_dma_dom; |
875 | address = virt_to_phys(dma_dom->pte_pages[i]); | 871 | address = virt_to_phys(*pte_page); |
876 | l2_pde[i] = IOMMU_L1_PDE(address); | 872 | l2_pde[i] = IOMMU_L1_PDE(address); |
877 | } | 873 | } |
878 | 874 | ||
@@ -1159,7 +1155,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, | |||
1159 | 1155 | ||
1160 | paddr &= PAGE_MASK; | 1156 | paddr &= PAGE_MASK; |
1161 | 1157 | ||
1162 | pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; | 1158 | pte = dom->aperture.pte_pages[IOMMU_PTE_L1_INDEX(address)]; |
1163 | pte += IOMMU_PTE_L0_INDEX(address); | 1159 | pte += IOMMU_PTE_L0_INDEX(address); |
1164 | 1160 | ||
1165 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; | 1161 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; |
@@ -1192,7 +1188,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu, | |||
1192 | 1188 | ||
1193 | WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size); | 1189 | WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size); |
1194 | 1190 | ||
1195 | pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; | 1191 | pte = dom->aperture.pte_pages[IOMMU_PTE_L1_INDEX(address)]; |
1196 | pte += IOMMU_PTE_L0_INDEX(address); | 1192 | pte += IOMMU_PTE_L0_INDEX(address); |
1197 | 1193 | ||
1198 | WARN_ON(!*pte); | 1194 | WARN_ON(!*pte); |