aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-05-12 06:02:46 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2009-05-28 12:13:20 -0400
commit8bda3092bcfa68f786d94549ae026e8db1eff041 (patch)
tree5ce545d406c066241c1f7543fa4c65c89575cacf /arch/x86/kernel/amd_iommu.c
parentc3239567a20e90e3026ac5453d5267506ef7b030 (diff)
amd-iommu: move page table allocation code to seperate function
This patch makes page table allocation usable for dma_ops code. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c86
1 files changed, 61 insertions, 25 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 62acd09cd19f..ded79f7747c5 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -55,7 +55,9 @@ struct iommu_cmd {
55static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, 55static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
56 struct unity_map_entry *e); 56 struct unity_map_entry *e);
57static struct dma_ops_domain *find_protection_domain(u16 devid); 57static struct dma_ops_domain *find_protection_domain(u16 devid);
58 58static u64* alloc_pte(struct protection_domain *dom,
59 unsigned long address, u64
60 **pte_page, gfp_t gfp);
59 61
60#ifdef CONFIG_AMD_IOMMU_STATS 62#ifdef CONFIG_AMD_IOMMU_STATS
61 63
@@ -468,7 +470,7 @@ static int iommu_map_page(struct protection_domain *dom,
468 unsigned long phys_addr, 470 unsigned long phys_addr,
469 int prot) 471 int prot)
470{ 472{
471 u64 __pte, *pte, *page; 473 u64 __pte, *pte;
472 474
473 bus_addr = PAGE_ALIGN(bus_addr); 475 bus_addr = PAGE_ALIGN(bus_addr);
474 phys_addr = PAGE_ALIGN(phys_addr); 476 phys_addr = PAGE_ALIGN(phys_addr);
@@ -477,27 +479,7 @@ static int iommu_map_page(struct protection_domain *dom,
477 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) 479 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
478 return -EINVAL; 480 return -EINVAL;
479 481
480 pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)]; 482 pte = alloc_pte(dom, bus_addr, NULL, GFP_KERNEL);
481
482 if (!IOMMU_PTE_PRESENT(*pte)) {
483 page = (u64 *)get_zeroed_page(GFP_KERNEL);
484 if (!page)
485 return -ENOMEM;
486 *pte = IOMMU_L2_PDE(virt_to_phys(page));
487 }
488
489 pte = IOMMU_PTE_PAGE(*pte);
490 pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
491
492 if (!IOMMU_PTE_PRESENT(*pte)) {
493 page = (u64 *)get_zeroed_page(GFP_KERNEL);
494 if (!page)
495 return -ENOMEM;
496 *pte = IOMMU_L1_PDE(virt_to_phys(page));
497 }
498
499 pte = IOMMU_PTE_PAGE(*pte);
500 pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)];
501 483
502 if (IOMMU_PTE_PRESENT(*pte)) 484 if (IOMMU_PTE_PRESENT(*pte))
503 return -EBUSY; 485 return -EBUSY;
@@ -1140,6 +1122,61 @@ static int get_device_resources(struct device *dev,
1140} 1122}
1141 1123
1142/* 1124/*
1125 * If the pte_page is not yet allocated this function is called
1126 */
1127static u64* alloc_pte(struct protection_domain *dom,
1128 unsigned long address, u64 **pte_page, gfp_t gfp)
1129{
1130 u64 *pte, *page;
1131
1132 pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)];
1133
1134 if (!IOMMU_PTE_PRESENT(*pte)) {
1135 page = (u64 *)get_zeroed_page(gfp);
1136 if (!page)
1137 return NULL;
1138 *pte = IOMMU_L2_PDE(virt_to_phys(page));
1139 }
1140
1141 pte = IOMMU_PTE_PAGE(*pte);
1142 pte = &pte[IOMMU_PTE_L1_INDEX(address)];
1143
1144 if (!IOMMU_PTE_PRESENT(*pte)) {
1145 page = (u64 *)get_zeroed_page(gfp);
1146 if (!page)
1147 return NULL;
1148 *pte = IOMMU_L1_PDE(virt_to_phys(page));
1149 }
1150
1151 pte = IOMMU_PTE_PAGE(*pte);
1152
1153 if (pte_page)
1154 *pte_page = pte;
1155
1156 pte = &pte[IOMMU_PTE_L0_INDEX(address)];
1157
1158 return pte;
1159}
1160
1161/*
1162 * This function fetches the PTE for a given address in the aperture
1163 */
1164static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
1165 unsigned long address)
1166{
1167 struct aperture_range *aperture = &dom->aperture;
1168 u64 *pte, *pte_page;
1169
1170 pte = aperture->pte_pages[IOMMU_PTE_L1_INDEX(address)];
1171 if (!pte) {
1172 pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC);
1173 aperture->pte_pages[IOMMU_PTE_L1_INDEX(address)] = pte_page;
1174 }
1175
1176 return pte;
1177}
1178
1179/*
1143 * This is the generic map function. It maps one 4kb page at paddr to 1180 * This is the generic map function. It maps one 4kb page at paddr to
1144 * the given address in the DMA address space for the domain. 1181 * the given address in the DMA address space for the domain.
1145 */ 1182 */
@@ -1155,8 +1192,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
1155 1192
1156 paddr &= PAGE_MASK; 1193 paddr &= PAGE_MASK;
1157 1194
1158 pte = dom->aperture.pte_pages[IOMMU_PTE_L1_INDEX(address)]; 1195 pte = dma_ops_get_pte(dom, address);
1159 pte += IOMMU_PTE_L0_INDEX(address);
1160 1196
1161 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; 1197 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
1162 1198