aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-09-02 10:00:23 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2009-09-03 10:03:45 -0400
commit04bfdd8406099fca2e6b8844748c4d6c5eba8c8d (patch)
treebab300413ef4c176ddc4050a50365d2e95160845 /arch/x86/kernel/amd_iommu.c
parent407d733e30a97daf5ea6f9eb5f9ebbd42a0a9ef2 (diff)
x86/amd-iommu: Flush domains if address space size was increased
Thist patch introduces the update_domain function which propagates the larger address space of a protection domain to the device table and flushes all relevant DTEs and the domain TLB. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 0fab1f1d135e..5eab6a84b9cc 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -63,6 +63,7 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
63 unsigned int pages); 63 unsigned int pages);
64static u64 *fetch_pte(struct protection_domain *domain, 64static u64 *fetch_pte(struct protection_domain *domain,
65 unsigned long address); 65 unsigned long address);
66static void update_domain(struct protection_domain *domain);
66 67
67#ifndef BUS_NOTIFY_UNBOUND_DRIVER 68#ifndef BUS_NOTIFY_UNBOUND_DRIVER
68#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 69#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
@@ -546,6 +547,8 @@ static int iommu_map_page(struct protection_domain *dom,
546 547
547 *pte = __pte; 548 *pte = __pte;
548 549
550 update_domain(dom);
551
549 return 0; 552 return 0;
550} 553}
551 554
@@ -762,9 +765,13 @@ static int alloc_new_range(struct amd_iommu *iommu,
762 dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); 765 dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
763 } 766 }
764 767
768 update_domain(&dma_dom->domain);
769
765 return 0; 770 return 0;
766 771
767out_free: 772out_free:
773 update_domain(&dma_dom->domain);
774
768 free_page((unsigned long)dma_dom->aperture[index]->bitmap); 775 free_page((unsigned long)dma_dom->aperture[index]->bitmap);
769 776
770 kfree(dma_dom->aperture[index]); 777 kfree(dma_dom->aperture[index]);
@@ -1294,6 +1301,29 @@ static int get_device_resources(struct device *dev,
1294 return 1; 1301 return 1;
1295} 1302}
1296 1303
1304static void update_device_table(struct protection_domain *domain)
1305{
1306 int i;
1307
1308 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
1309 if (amd_iommu_pd_table[i] != domain)
1310 continue;
1311 set_dte_entry(i, domain);
1312 }
1313}
1314
1315static void update_domain(struct protection_domain *domain)
1316{
1317 if (!domain->updated)
1318 return;
1319
1320 update_device_table(domain);
1321 flush_devices_by_domain(domain);
1322 iommu_flush_domain(domain->id);
1323
1324 domain->updated = false;
1325}
1326
1297/* 1327/*
1298 * If the pte_page is not yet allocated this function is called 1328 * If the pte_page is not yet allocated this function is called
1299 */ 1329 */
@@ -1351,6 +1381,8 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
1351 } else 1381 } else
1352 pte += IOMMU_PTE_L0_INDEX(address); 1382 pte += IOMMU_PTE_L0_INDEX(address);
1353 1383
1384 update_domain(&dom->domain);
1385
1354 return pte; 1386 return pte;
1355} 1387}
1356 1388