aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2009-06-19 16:47:29 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-06-23 17:07:54 -0400
commit2c2e2c389d03bb16b8cdf9db3ac615385fac100f (patch)
tree19af024e80adbbd1536aae0fa0c23ed4249834fe /drivers/pci/intel-iommu.c
parent687d680985b1438360a9ba470ece8b57cd205c3b (diff)
IOMMU Identity Mapping Support (drivers/pci/intel_iommu.c)
Identity mapping for IOMMU defines a single domain to 1:1 map all PCI devices to all usable memory. This reduces map/unmap overhead in DMA API's and improve IOMMU performance. On 10Gb network cards, Netperf shows no performance degradation compared to non-IOMMU performance. This method may lose some of DMA remapping benefits like isolation. The patch sets up identity mapping for all PCI devices to all usable memory. In the DMA API, there is no overhead to maintain page tables, invalidate iotlb, flush cache etc. 32 bit DMA devices don't use identity mapping domain, in order to access memory beyond 4GiB. When kernel option iommu=pt, pass through is first tried. If pass through succeeds, IOMMU goes to pass through. If pass through is not supported in hw or fail for whatever reason, IOMMU goes to identity mapping. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c314
1 files changed, 254 insertions, 60 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 178853a07440..e53eacd75c8d 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -39,6 +39,7 @@
39#include <linux/sysdev.h> 39#include <linux/sysdev.h>
40#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
41#include <asm/iommu.h> 41#include <asm/iommu.h>
42#include <asm/e820.h>
42#include "pci.h" 43#include "pci.h"
43 44
44#define ROOT_SIZE VTD_PAGE_SIZE 45#define ROOT_SIZE VTD_PAGE_SIZE
@@ -217,6 +218,14 @@ static inline bool dma_pte_present(struct dma_pte *pte)
217 return (pte->val & 3) != 0; 218 return (pte->val & 3) != 0;
218} 219}
219 220
221/*
222 * This domain is a statically identity mapping domain.
223 * 1. This domain creats a static 1:1 mapping to all usable memory.
224 * 2. It maps to each iommu if successful.
225 * 3. Each iommu mapps to this domain if successful.
226 */
227struct dmar_domain *si_domain;
228
220/* devices under the same p2p bridge are owned in one domain */ 229/* devices under the same p2p bridge are owned in one domain */
221#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) 230#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
222 231
@@ -225,6 +234,9 @@ static inline bool dma_pte_present(struct dma_pte *pte)
225 */ 234 */
226#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) 235#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
227 236
237/* si_domain contains mulitple devices */
238#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
239
228struct dmar_domain { 240struct dmar_domain {
229 int id; /* domain id */ 241 int id; /* domain id */
230 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ 242 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
@@ -435,12 +447,14 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
435 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); 447 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
436} 448}
437 449
438/* in native case, each domain is related to only one iommu */ 450/* This functionin only returns single iommu in a domain */
439static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) 451static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
440{ 452{
441 int iommu_id; 453 int iommu_id;
442 454
455 /* si_domain and vm domain should not get here. */
443 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); 456 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
457 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
444 458
445 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); 459 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
446 if (iommu_id < 0 || iommu_id >= g_num_of_iommus) 460 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
@@ -1189,48 +1203,71 @@ void free_dmar_iommu(struct intel_iommu *iommu)
1189 free_context_table(iommu); 1203 free_context_table(iommu);
1190} 1204}
1191 1205
1192static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) 1206static struct dmar_domain *alloc_domain(void)
1193{ 1207{
1194 unsigned long num;
1195 unsigned long ndomains;
1196 struct dmar_domain *domain; 1208 struct dmar_domain *domain;
1197 unsigned long flags;
1198 1209
1199 domain = alloc_domain_mem(); 1210 domain = alloc_domain_mem();
1200 if (!domain) 1211 if (!domain)
1201 return NULL; 1212 return NULL;
1202 1213
1214 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1215 domain->flags = 0;
1216
1217 return domain;
1218}
1219
1220static int iommu_attach_domain(struct dmar_domain *domain,
1221 struct intel_iommu *iommu)
1222{
1223 int num;
1224 unsigned long ndomains;
1225 unsigned long flags;
1226
1203 ndomains = cap_ndoms(iommu->cap); 1227 ndomains = cap_ndoms(iommu->cap);
1204 1228
1205 spin_lock_irqsave(&iommu->lock, flags); 1229 spin_lock_irqsave(&iommu->lock, flags);
1230
1206 num = find_first_zero_bit(iommu->domain_ids, ndomains); 1231 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1207 if (num >= ndomains) { 1232 if (num >= ndomains) {
1208 spin_unlock_irqrestore(&iommu->lock, flags); 1233 spin_unlock_irqrestore(&iommu->lock, flags);
1209 free_domain_mem(domain);
1210 printk(KERN_ERR "IOMMU: no free domain ids\n"); 1234 printk(KERN_ERR "IOMMU: no free domain ids\n");
1211 return NULL; 1235 return -ENOMEM;
1212 } 1236 }
1213 1237
1214 set_bit(num, iommu->domain_ids);
1215 domain->id = num; 1238 domain->id = num;
1216 memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); 1239 set_bit(num, iommu->domain_ids);
1217 set_bit(iommu->seq_id, &domain->iommu_bmp); 1240 set_bit(iommu->seq_id, &domain->iommu_bmp);
1218 domain->flags = 0;
1219 iommu->domains[num] = domain; 1241 iommu->domains[num] = domain;
1220 spin_unlock_irqrestore(&iommu->lock, flags); 1242 spin_unlock_irqrestore(&iommu->lock, flags);
1221 1243
1222 return domain; 1244 return 0;
1223} 1245}
1224 1246
1225static void iommu_free_domain(struct dmar_domain *domain) 1247static void iommu_detach_domain(struct dmar_domain *domain,
1248 struct intel_iommu *iommu)
1226{ 1249{
1227 unsigned long flags; 1250 unsigned long flags;
1228 struct intel_iommu *iommu; 1251 int num, ndomains;
1229 1252 int found = 0;
1230 iommu = domain_get_iommu(domain);
1231 1253
1232 spin_lock_irqsave(&iommu->lock, flags); 1254 spin_lock_irqsave(&iommu->lock, flags);
1233 clear_bit(domain->id, iommu->domain_ids); 1255 ndomains = cap_ndoms(iommu->cap);
1256 num = find_first_bit(iommu->domain_ids, ndomains);
1257 for (; num < ndomains; ) {
1258 if (iommu->domains[num] == domain) {
1259 found = 1;
1260 break;
1261 }
1262 num = find_next_bit(iommu->domain_ids,
1263 cap_ndoms(iommu->cap), num+1);
1264 }
1265
1266 if (found) {
1267 clear_bit(num, iommu->domain_ids);
1268 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1269 iommu->domains[num] = NULL;
1270 }
1234 spin_unlock_irqrestore(&iommu->lock, flags); 1271 spin_unlock_irqrestore(&iommu->lock, flags);
1235} 1272}
1236 1273
@@ -1350,6 +1387,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1350 1387
1351static void domain_exit(struct dmar_domain *domain) 1388static void domain_exit(struct dmar_domain *domain)
1352{ 1389{
1390 struct dmar_drhd_unit *drhd;
1391 struct intel_iommu *iommu;
1353 u64 end; 1392 u64 end;
1354 1393
1355 /* Domain 0 is reserved, so dont process it */ 1394 /* Domain 0 is reserved, so dont process it */
@@ -1368,7 +1407,10 @@ static void domain_exit(struct dmar_domain *domain)
1368 /* free page tables */ 1407 /* free page tables */
1369 dma_pte_free_pagetable(domain, 0, end); 1408 dma_pte_free_pagetable(domain, 0, end);
1370 1409
1371 iommu_free_domain(domain); 1410 for_each_active_iommu(iommu, drhd)
1411 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1412 iommu_detach_domain(domain, iommu);
1413
1372 free_domain_mem(domain); 1414 free_domain_mem(domain);
1373} 1415}
1374 1416
@@ -1408,7 +1450,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1408 id = domain->id; 1450 id = domain->id;
1409 pgd = domain->pgd; 1451 pgd = domain->pgd;
1410 1452
1411 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { 1453 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1454 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1412 int found = 0; 1455 int found = 0;
1413 1456
1414 /* find an available domain id for this device in iommu */ 1457 /* find an available domain id for this device in iommu */
@@ -1433,6 +1476,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1433 } 1476 }
1434 1477
1435 set_bit(num, iommu->domain_ids); 1478 set_bit(num, iommu->domain_ids);
1479 set_bit(iommu->seq_id, &domain->iommu_bmp);
1436 iommu->domains[num] = domain; 1480 iommu->domains[num] = domain;
1437 id = num; 1481 id = num;
1438 } 1482 }
@@ -1675,6 +1719,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1675 unsigned long flags; 1719 unsigned long flags;
1676 int bus = 0, devfn = 0; 1720 int bus = 0, devfn = 0;
1677 int segment; 1721 int segment;
1722 int ret;
1678 1723
1679 domain = find_domain(pdev); 1724 domain = find_domain(pdev);
1680 if (domain) 1725 if (domain)
@@ -1707,6 +1752,10 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1707 } 1752 }
1708 } 1753 }
1709 1754
1755 domain = alloc_domain();
1756 if (!domain)
1757 goto error;
1758
1710 /* Allocate new domain for the device */ 1759 /* Allocate new domain for the device */
1711 drhd = dmar_find_matched_drhd_unit(pdev); 1760 drhd = dmar_find_matched_drhd_unit(pdev);
1712 if (!drhd) { 1761 if (!drhd) {
@@ -1716,9 +1765,11 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1716 } 1765 }
1717 iommu = drhd->iommu; 1766 iommu = drhd->iommu;
1718 1767
1719 domain = iommu_alloc_domain(iommu); 1768 ret = iommu_attach_domain(domain, iommu);
1720 if (!domain) 1769 if (ret) {
1770 domain_exit(domain);
1721 goto error; 1771 goto error;
1772 }
1722 1773
1723 if (domain_init(domain, gaw)) { 1774 if (domain_init(domain, gaw)) {
1724 domain_exit(domain); 1775 domain_exit(domain);
@@ -1792,6 +1843,8 @@ error:
1792 return find_domain(pdev); 1843 return find_domain(pdev);
1793} 1844}
1794 1845
1846static int iommu_identity_mapping;
1847
1795static int iommu_prepare_identity_map(struct pci_dev *pdev, 1848static int iommu_prepare_identity_map(struct pci_dev *pdev,
1796 unsigned long long start, 1849 unsigned long long start,
1797 unsigned long long end) 1850 unsigned long long end)
@@ -1804,8 +1857,11 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
1804 printk(KERN_INFO 1857 printk(KERN_INFO
1805 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", 1858 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1806 pci_name(pdev), start, end); 1859 pci_name(pdev), start, end);
1807 /* page table init */ 1860 if (iommu_identity_mapping)
1808 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); 1861 domain = si_domain;
1862 else
1863 /* page table init */
1864 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1809 if (!domain) 1865 if (!domain)
1810 return -ENOMEM; 1866 return -ENOMEM;
1811 1867
@@ -1952,7 +2008,110 @@ static int __init init_context_pass_through(void)
1952 return 0; 2008 return 0;
1953} 2009}
1954 2010
1955static int __init init_dmars(void) 2011static int md_domain_init(struct dmar_domain *domain, int guest_width);
2012static int si_domain_init(void)
2013{
2014 struct dmar_drhd_unit *drhd;
2015 struct intel_iommu *iommu;
2016 int ret = 0;
2017
2018 si_domain = alloc_domain();
2019 if (!si_domain)
2020 return -EFAULT;
2021
2022
2023 for_each_active_iommu(iommu, drhd) {
2024 ret = iommu_attach_domain(si_domain, iommu);
2025 if (ret) {
2026 domain_exit(si_domain);
2027 return -EFAULT;
2028 }
2029 }
2030
2031 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2032 domain_exit(si_domain);
2033 return -EFAULT;
2034 }
2035
2036 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2037
2038 return 0;
2039}
2040
2041static void domain_remove_one_dev_info(struct dmar_domain *domain,
2042 struct pci_dev *pdev);
2043static int identity_mapping(struct pci_dev *pdev)
2044{
2045 struct device_domain_info *info;
2046
2047 if (likely(!iommu_identity_mapping))
2048 return 0;
2049
2050
2051 list_for_each_entry(info, &si_domain->devices, link)
2052 if (info->dev == pdev)
2053 return 1;
2054 return 0;
2055}
2056
2057static int domain_add_dev_info(struct dmar_domain *domain,
2058 struct pci_dev *pdev)
2059{
2060 struct device_domain_info *info;
2061 unsigned long flags;
2062
2063 info = alloc_devinfo_mem();
2064 if (!info)
2065 return -ENOMEM;
2066
2067 info->segment = pci_domain_nr(pdev->bus);
2068 info->bus = pdev->bus->number;
2069 info->devfn = pdev->devfn;
2070 info->dev = pdev;
2071 info->domain = domain;
2072
2073 spin_lock_irqsave(&device_domain_lock, flags);
2074 list_add(&info->link, &domain->devices);
2075 list_add(&info->global, &device_domain_list);
2076 pdev->dev.archdata.iommu = info;
2077 spin_unlock_irqrestore(&device_domain_lock, flags);
2078
2079 return 0;
2080}
2081
2082static int iommu_prepare_static_identity_mapping(void)
2083{
2084 int i;
2085 struct pci_dev *pdev = NULL;
2086 int ret;
2087
2088 ret = si_domain_init();
2089 if (ret)
2090 return -EFAULT;
2091
2092 printk(KERN_INFO "IOMMU: Setting identity map:\n");
2093 for_each_pci_dev(pdev) {
2094 for (i = 0; i < e820.nr_map; i++) {
2095 struct e820entry *ei = &e820.map[i];
2096
2097 if (ei->type == E820_RAM) {
2098 ret = iommu_prepare_identity_map(pdev,
2099 ei->addr, ei->addr + ei->size);
2100 if (ret) {
2101 printk(KERN_INFO "1:1 mapping to one domain failed.\n");
2102 return -EFAULT;
2103 }
2104 }
2105 }
2106 ret = domain_add_dev_info(si_domain, pdev);
2107 if (ret)
2108 return ret;
2109 }
2110
2111 return 0;
2112}
2113
2114int __init init_dmars(void)
1956{ 2115{
1957 struct dmar_drhd_unit *drhd; 2116 struct dmar_drhd_unit *drhd;
1958 struct dmar_rmrr_unit *rmrr; 2117 struct dmar_rmrr_unit *rmrr;
@@ -1962,6 +2121,13 @@ static int __init init_dmars(void)
1962 int pass_through = 1; 2121 int pass_through = 1;
1963 2122
1964 /* 2123 /*
2124 * In case pass through can not be enabled, iommu tries to use identity
2125 * mapping.
2126 */
2127 if (iommu_pass_through)
2128 iommu_identity_mapping = 1;
2129
2130 /*
1965 * for each drhd 2131 * for each drhd
1966 * allocate root 2132 * allocate root
1967 * initialize and program root entry to not present 2133 * initialize and program root entry to not present
@@ -2090,9 +2256,12 @@ static int __init init_dmars(void)
2090 2256
2091 /* 2257 /*
2092 * If pass through is not set or not enabled, setup context entries for 2258 * If pass through is not set or not enabled, setup context entries for
2093 * identity mappings for rmrr, gfx, and isa. 2259 * identity mappings for rmrr, gfx, and isa and may fall back to static
2260 * identity mapping if iommu_identity_mapping is set.
2094 */ 2261 */
2095 if (!iommu_pass_through) { 2262 if (!iommu_pass_through) {
2263 if (iommu_identity_mapping)
2264 iommu_prepare_static_identity_mapping();
2096 /* 2265 /*
2097 * For each rmrr 2266 * For each rmrr
2098 * for each dev attached to rmrr 2267 * for each dev attached to rmrr
@@ -2107,6 +2276,7 @@ static int __init init_dmars(void)
2107 * endfor 2276 * endfor
2108 * endfor 2277 * endfor
2109 */ 2278 */
2279 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2110 for_each_rmrr_units(rmrr) { 2280 for_each_rmrr_units(rmrr) {
2111 for (i = 0; i < rmrr->devices_cnt; i++) { 2281 for (i = 0; i < rmrr->devices_cnt; i++) {
2112 pdev = rmrr->devices[i]; 2282 pdev = rmrr->devices[i];
@@ -2248,6 +2418,52 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
2248 return domain; 2418 return domain;
2249} 2419}
2250 2420
2421static int iommu_dummy(struct pci_dev *pdev)
2422{
2423 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2424}
2425
2426/* Check if the pdev needs to go through non-identity map and unmap process.*/
2427static int iommu_no_mapping(struct pci_dev *pdev)
2428{
2429 int found;
2430
2431 if (!iommu_identity_mapping)
2432 return iommu_dummy(pdev);
2433
2434 found = identity_mapping(pdev);
2435 if (found) {
2436 if (pdev->dma_mask > DMA_BIT_MASK(32))
2437 return 1;
2438 else {
2439 /*
2440 * 32 bit DMA is removed from si_domain and fall back
2441 * to non-identity mapping.
2442 */
2443 domain_remove_one_dev_info(si_domain, pdev);
2444 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2445 pci_name(pdev));
2446 return 0;
2447 }
2448 } else {
2449 /*
2450 * In case of a detached 64 bit DMA device from vm, the device
2451 * is put into si_domain for identity mapping.
2452 */
2453 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2454 int ret;
2455 ret = domain_add_dev_info(si_domain, pdev);
2456 if (!ret) {
2457 printk(KERN_INFO "64bit %s uses identity mapping\n",
2458 pci_name(pdev));
2459 return 1;
2460 }
2461 }
2462 }
2463
2464 return iommu_dummy(pdev);
2465}
2466
2251static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, 2467static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2252 size_t size, int dir, u64 dma_mask) 2468 size_t size, int dir, u64 dma_mask)
2253{ 2469{
@@ -2260,7 +2476,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2260 struct intel_iommu *iommu; 2476 struct intel_iommu *iommu;
2261 2477
2262 BUG_ON(dir == DMA_NONE); 2478 BUG_ON(dir == DMA_NONE);
2263 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2479
2480 if (iommu_no_mapping(pdev))
2264 return paddr; 2481 return paddr;
2265 2482
2266 domain = get_valid_domain_for_dev(pdev); 2483 domain = get_valid_domain_for_dev(pdev);
@@ -2401,8 +2618,9 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2401 struct iova *iova; 2618 struct iova *iova;
2402 struct intel_iommu *iommu; 2619 struct intel_iommu *iommu;
2403 2620
2404 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2621 if (iommu_no_mapping(pdev))
2405 return; 2622 return;
2623
2406 domain = find_domain(pdev); 2624 domain = find_domain(pdev);
2407 BUG_ON(!domain); 2625 BUG_ON(!domain);
2408 2626
@@ -2492,7 +2710,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2492 struct scatterlist *sg; 2710 struct scatterlist *sg;
2493 struct intel_iommu *iommu; 2711 struct intel_iommu *iommu;
2494 2712
2495 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2713 if (iommu_no_mapping(pdev))
2496 return; 2714 return;
2497 2715
2498 domain = find_domain(pdev); 2716 domain = find_domain(pdev);
@@ -2553,7 +2771,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2553 struct intel_iommu *iommu; 2771 struct intel_iommu *iommu;
2554 2772
2555 BUG_ON(dir == DMA_NONE); 2773 BUG_ON(dir == DMA_NONE);
2556 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2774 if (iommu_no_mapping(pdev))
2557 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); 2775 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2558 2776
2559 domain = get_valid_domain_for_dev(pdev); 2777 domain = get_valid_domain_for_dev(pdev);
@@ -2951,31 +3169,6 @@ int __init intel_iommu_init(void)
2951 return 0; 3169 return 0;
2952} 3170}
2953 3171
2954static int vm_domain_add_dev_info(struct dmar_domain *domain,
2955 struct pci_dev *pdev)
2956{
2957 struct device_domain_info *info;
2958 unsigned long flags;
2959
2960 info = alloc_devinfo_mem();
2961 if (!info)
2962 return -ENOMEM;
2963
2964 info->segment = pci_domain_nr(pdev->bus);
2965 info->bus = pdev->bus->number;
2966 info->devfn = pdev->devfn;
2967 info->dev = pdev;
2968 info->domain = domain;
2969
2970 spin_lock_irqsave(&device_domain_lock, flags);
2971 list_add(&info->link, &domain->devices);
2972 list_add(&info->global, &device_domain_list);
2973 pdev->dev.archdata.iommu = info;
2974 spin_unlock_irqrestore(&device_domain_lock, flags);
2975
2976 return 0;
2977}
2978
2979static void iommu_detach_dependent_devices(struct intel_iommu *iommu, 3172static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
2980 struct pci_dev *pdev) 3173 struct pci_dev *pdev)
2981{ 3174{
@@ -3003,7 +3196,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3003 } 3196 }
3004} 3197}
3005 3198
3006static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, 3199static void domain_remove_one_dev_info(struct dmar_domain *domain,
3007 struct pci_dev *pdev) 3200 struct pci_dev *pdev)
3008{ 3201{
3009 struct device_domain_info *info; 3202 struct device_domain_info *info;
@@ -3136,7 +3329,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
3136 return domain; 3329 return domain;
3137} 3330}
3138 3331
3139static int vm_domain_init(struct dmar_domain *domain, int guest_width) 3332static int md_domain_init(struct dmar_domain *domain, int guest_width)
3140{ 3333{
3141 int adjust_width; 3334 int adjust_width;
3142 3335
@@ -3227,7 +3420,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
3227 "intel_iommu_domain_init: dmar_domain == NULL\n"); 3420 "intel_iommu_domain_init: dmar_domain == NULL\n");
3228 return -ENOMEM; 3421 return -ENOMEM;
3229 } 3422 }
3230 if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { 3423 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3231 printk(KERN_ERR 3424 printk(KERN_ERR
3232 "intel_iommu_domain_init() failed\n"); 3425 "intel_iommu_domain_init() failed\n");
3233 vm_domain_exit(dmar_domain); 3426 vm_domain_exit(dmar_domain);
@@ -3262,8 +3455,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3262 3455
3263 old_domain = find_domain(pdev); 3456 old_domain = find_domain(pdev);
3264 if (old_domain) { 3457 if (old_domain) {
3265 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) 3458 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3266 vm_domain_remove_one_dev_info(old_domain, pdev); 3459 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3460 domain_remove_one_dev_info(old_domain, pdev);
3267 else 3461 else
3268 domain_remove_dev_info(old_domain); 3462 domain_remove_dev_info(old_domain);
3269 } 3463 }
@@ -3285,7 +3479,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3285 return -EFAULT; 3479 return -EFAULT;
3286 } 3480 }
3287 3481
3288 ret = vm_domain_add_dev_info(dmar_domain, pdev); 3482 ret = domain_add_dev_info(dmar_domain, pdev);
3289 if (ret) 3483 if (ret)
3290 return ret; 3484 return ret;
3291 3485
@@ -3299,7 +3493,7 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
3299 struct dmar_domain *dmar_domain = domain->priv; 3493 struct dmar_domain *dmar_domain = domain->priv;
3300 struct pci_dev *pdev = to_pci_dev(dev); 3494 struct pci_dev *pdev = to_pci_dev(dev);
3301 3495
3302 vm_domain_remove_one_dev_info(dmar_domain, pdev); 3496 domain_remove_one_dev_info(dmar_domain, pdev);
3303} 3497}
3304 3498
3305static int intel_iommu_map_range(struct iommu_domain *domain, 3499static int intel_iommu_map_range(struct iommu_domain *domain,