aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-01-15 08:41:15 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2010-03-07 12:01:11 -0500
commitcbb9d729f3433c9c2660b01dc52e6deb89488886 (patch)
treee0384945ce9d235d5d3ae4e151728ce3faeb2cdf /arch/x86/kernel/amd_iommu.c
parentfcd95807fb61e67d602610e7ff7129ed769e9fee (diff)
x86/amd-iommu: Make iommu_map_page and alloc_pte aware of page sizes
This patch changes the old map_size parameter of alloc_pte to a page_size parameter which can be used more easily to alloc a pte for intermediate page sizes. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c53
1 files changed, 33 insertions, 20 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 59cae7c4df54..41700314f3e0 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -730,18 +730,22 @@ static bool increase_address_space(struct protection_domain *domain,
730 730
731static u64 *alloc_pte(struct protection_domain *domain, 731static u64 *alloc_pte(struct protection_domain *domain,
732 unsigned long address, 732 unsigned long address,
733 int end_lvl, 733 unsigned long page_size,
734 u64 **pte_page, 734 u64 **pte_page,
735 gfp_t gfp) 735 gfp_t gfp)
736{ 736{
737 int level, end_lvl;
737 u64 *pte, *page; 738 u64 *pte, *page;
738 int level; 739
740 BUG_ON(!is_power_of_2(page_size));
739 741
740 while (address > PM_LEVEL_SIZE(domain->mode)) 742 while (address > PM_LEVEL_SIZE(domain->mode))
741 increase_address_space(domain, gfp); 743 increase_address_space(domain, gfp);
742 744
743 level = domain->mode - 1; 745 level = domain->mode - 1;
744 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; 746 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
747 address = PAGE_SIZE_ALIGN(address, page_size);
748 end_lvl = PAGE_SIZE_LEVEL(page_size);
745 749
746 while (level > end_lvl) { 750 while (level > end_lvl) {
747 if (!IOMMU_PTE_PRESENT(*pte)) { 751 if (!IOMMU_PTE_PRESENT(*pte)) {
@@ -751,6 +755,10 @@ static u64 *alloc_pte(struct protection_domain *domain,
751 *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); 755 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
752 } 756 }
753 757
758 /* No level skipping support yet */
759 if (PM_PTE_LEVEL(*pte) != level)
760 return NULL;
761
754 level -= 1; 762 level -= 1;
755 763
756 pte = IOMMU_PTE_PAGE(*pte); 764 pte = IOMMU_PTE_PAGE(*pte);
@@ -806,31 +814,36 @@ static int iommu_map_page(struct protection_domain *dom,
806 unsigned long bus_addr, 814 unsigned long bus_addr,
807 unsigned long phys_addr, 815 unsigned long phys_addr,
808 int prot, 816 int prot,
809 int map_size) 817 unsigned long page_size)
810{ 818{
811 u64 __pte, *pte; 819 u64 __pte, *pte;
812 820 int i, count;
813 bus_addr = PAGE_ALIGN(bus_addr);
814 phys_addr = PAGE_ALIGN(phys_addr);
815
816 BUG_ON(!PM_ALIGNED(map_size, bus_addr));
817 BUG_ON(!PM_ALIGNED(map_size, phys_addr));
818 821
819 if (!(prot & IOMMU_PROT_MASK)) 822 if (!(prot & IOMMU_PROT_MASK))
820 return -EINVAL; 823 return -EINVAL;
821 824
822 pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL); 825 bus_addr = PAGE_ALIGN(bus_addr);
826 phys_addr = PAGE_ALIGN(phys_addr);
827 count = PAGE_SIZE_PTE_COUNT(page_size);
828 pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
829
830 for (i = 0; i < count; ++i)
831 if (IOMMU_PTE_PRESENT(pte[i]))
832 return -EBUSY;
823 833
824 if (IOMMU_PTE_PRESENT(*pte)) 834 if (page_size > PAGE_SIZE) {
825 return -EBUSY; 835 __pte = PAGE_SIZE_PTE(phys_addr, page_size);
836 __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
837 } else
838 __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
826 839
827 __pte = phys_addr | IOMMU_PTE_P;
828 if (prot & IOMMU_PROT_IR) 840 if (prot & IOMMU_PROT_IR)
829 __pte |= IOMMU_PTE_IR; 841 __pte |= IOMMU_PTE_IR;
830 if (prot & IOMMU_PROT_IW) 842 if (prot & IOMMU_PROT_IW)
831 __pte |= IOMMU_PTE_IW; 843 __pte |= IOMMU_PTE_IW;
832 844
833 *pte = __pte; 845 for (i = 0; i < count; ++i)
846 pte[i] = __pte;
834 847
835 update_domain(dom); 848 update_domain(dom);
836 849
@@ -877,7 +890,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
877 for (addr = e->address_start; addr < e->address_end; 890 for (addr = e->address_start; addr < e->address_end;
878 addr += PAGE_SIZE) { 891 addr += PAGE_SIZE) {
879 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, 892 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
880 PM_MAP_4k); 893 PAGE_SIZE);
881 if (ret) 894 if (ret)
882 return ret; 895 return ret;
883 /* 896 /*
@@ -1005,7 +1018,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
1005 u64 *pte, *pte_page; 1018 u64 *pte, *pte_page;
1006 1019
1007 for (i = 0; i < num_ptes; ++i) { 1020 for (i = 0; i < num_ptes; ++i) {
1008 pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k, 1021 pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
1009 &pte_page, gfp); 1022 &pte_page, gfp);
1010 if (!pte) 1023 if (!pte)
1011 goto out_free; 1024 goto out_free;
@@ -1711,7 +1724,7 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
1711 1724
1712 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; 1725 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
1713 if (!pte) { 1726 if (!pte) {
1714 pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page, 1727 pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
1715 GFP_ATOMIC); 1728 GFP_ATOMIC);
1716 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; 1729 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
1717 } else 1730 } else
@@ -2457,7 +2470,7 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
2457 paddr &= PAGE_MASK; 2470 paddr &= PAGE_MASK;
2458 2471
2459 for (i = 0; i < npages; ++i) { 2472 for (i = 0; i < npages; ++i) {
2460 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); 2473 ret = iommu_map_page(domain, iova, paddr, prot, PAGE_SIZE);
2461 if (ret) 2474 if (ret)
2462 return ret; 2475 return ret;
2463 2476