aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-11-24 09:39:42 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-11-27 08:17:09 -0500
commit15898bbcb48fc86c2baff156163df0941ecb6a15 (patch)
tree0c3992a2bcedab86647f3829bbbc97e9eec9dfdb /arch
parentf3be07da531ceef1b51295e5becc9bc07670b671 (diff)
x86/amd-iommu: Let domain_for_device handle aliases
If there is no domain associated to a device yet and the device has an alias device which already has a domain, the original device needs to have the same domain as the alias device. This patch changes domain_for_device to handle this situation and directly assigns the alias device domain to the device in this situation. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/amd_iommu.c227
1 files changed, 135 insertions, 92 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 2cd5800e688..75470ffee35 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -71,6 +71,19 @@ static u64 *fetch_pte(struct protection_domain *domain,
71 unsigned long address, int map_size); 71 unsigned long address, int map_size);
72static void update_domain(struct protection_domain *domain); 72static void update_domain(struct protection_domain *domain);
73 73
74/****************************************************************************
75 *
76 * Helper functions
77 *
78 ****************************************************************************/
79
80static inline u16 get_device_id(struct device *dev)
81{
82 struct pci_dev *pdev = to_pci_dev(dev);
83
84 return calc_devid(pdev->bus->number, pdev->devfn);
85}
86
74#ifdef CONFIG_AMD_IOMMU_STATS 87#ifdef CONFIG_AMD_IOMMU_STATS
75 88
76/* 89/*
@@ -1174,26 +1187,13 @@ static bool dma_ops_domain(struct protection_domain *domain)
1174 return domain->flags & PD_DMA_OPS_MASK; 1187 return domain->flags & PD_DMA_OPS_MASK;
1175} 1188}
1176 1189
1177/*
1178 * Find out the protection domain structure for a given PCI device. This
1179 * will give us the pointer to the page table root for example.
1180 */
1181static struct protection_domain *domain_for_device(u16 devid)
1182{
1183 struct protection_domain *dom;
1184 unsigned long flags;
1185
1186 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
1187 dom = amd_iommu_pd_table[devid];
1188 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1189
1190 return dom;
1191}
1192
1193static void set_dte_entry(u16 devid, struct protection_domain *domain) 1190static void set_dte_entry(u16 devid, struct protection_domain *domain)
1194{ 1191{
1192 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1195 u64 pte_root = virt_to_phys(domain->pt_root); 1193 u64 pte_root = virt_to_phys(domain->pt_root);
1196 1194
1195 BUG_ON(amd_iommu_pd_table[devid] != NULL);
1196
1197 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) 1197 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1198 << DEV_ENTRY_MODE_SHIFT; 1198 << DEV_ENTRY_MODE_SHIFT;
1199 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; 1199 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
@@ -1203,42 +1203,87 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain)
1203 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); 1203 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
1204 1204
1205 amd_iommu_pd_table[devid] = domain; 1205 amd_iommu_pd_table[devid] = domain;
1206
1207 /* Do reference counting */
1208 domain->dev_iommu[iommu->index] += 1;
1209 domain->dev_cnt += 1;
1210
1211 /* Flush the changes DTE entry */
1212 iommu_queue_inv_dev_entry(iommu, devid);
1213}
1214
1215static void clear_dte_entry(u16 devid)
1216{
1217 struct protection_domain *domain = amd_iommu_pd_table[devid];
1218 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1219
1220 BUG_ON(domain == NULL);
1221
1222 /* remove domain from the lookup table */
1223 amd_iommu_pd_table[devid] = NULL;
1224
1225 /* remove entry from the device table seen by the hardware */
1226 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1227 amd_iommu_dev_table[devid].data[1] = 0;
1228 amd_iommu_dev_table[devid].data[2] = 0;
1229
1230 amd_iommu_apply_erratum_63(devid);
1231
1232 /* decrease reference counters */
1233 domain->dev_iommu[iommu->index] -= 1;
1234 domain->dev_cnt -= 1;
1235
1236 iommu_queue_inv_dev_entry(iommu, devid);
1206} 1237}
1207 1238
1208/* 1239/*
1209 * If a device is not yet associated with a domain, this function does 1240 * If a device is not yet associated with a domain, this function does
1210 * assigns it visible for the hardware 1241 * assigns it visible for the hardware
1211 */ 1242 */
1212static void __attach_device(struct amd_iommu *iommu, 1243static int __attach_device(struct device *dev,
1213 struct protection_domain *domain, 1244 struct protection_domain *domain)
1214 u16 devid)
1215{ 1245{
1246 u16 devid = get_device_id(dev);
1247 u16 alias = amd_iommu_alias_table[devid];
1248
1216 /* lock domain */ 1249 /* lock domain */
1217 spin_lock(&domain->lock); 1250 spin_lock(&domain->lock);
1218 1251
1219 /* update DTE entry */ 1252 /* Some sanity checks */
1220 set_dte_entry(devid, domain); 1253 if (amd_iommu_pd_table[alias] != NULL &&
1254 amd_iommu_pd_table[alias] != domain)
1255 return -EBUSY;
1221 1256
1222 /* Do reference counting */ 1257 if (amd_iommu_pd_table[devid] != NULL &&
1223 domain->dev_iommu[iommu->index] += 1; 1258 amd_iommu_pd_table[devid] != domain)
1224 domain->dev_cnt += 1; 1259 return -EBUSY;
1260
1261 /* Do real assignment */
1262 if (alias != devid &&
1263 amd_iommu_pd_table[alias] == NULL)
1264 set_dte_entry(alias, domain);
1265
1266 if (amd_iommu_pd_table[devid] == NULL)
1267 set_dte_entry(devid, domain);
1225 1268
1226 /* ready */ 1269 /* ready */
1227 spin_unlock(&domain->lock); 1270 spin_unlock(&domain->lock);
1271
1272 return 0;
1228} 1273}
1229 1274
1230/* 1275/*
1231 * If a device is not yet associated with a domain, this function does 1276 * If a device is not yet associated with a domain, this function does
1232 * assigns it visible for the hardware 1277 * assigns it visible for the hardware
1233 */ 1278 */
1234static void attach_device(struct amd_iommu *iommu, 1279static int attach_device(struct device *dev,
1235 struct protection_domain *domain, 1280 struct protection_domain *domain)
1236 u16 devid)
1237{ 1281{
1238 unsigned long flags; 1282 unsigned long flags;
1283 int ret;
1239 1284
1240 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1285 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1241 __attach_device(iommu, domain, devid); 1286 ret = __attach_device(dev, domain);
1242 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1287 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1243 1288
1244 /* 1289 /*
@@ -1246,62 +1291,70 @@ static void attach_device(struct amd_iommu *iommu,
1246 * left the caches in the IOMMU dirty. So we have to flush 1291 * left the caches in the IOMMU dirty. So we have to flush
1247 * here to evict all dirty stuff. 1292 * here to evict all dirty stuff.
1248 */ 1293 */
1249 iommu_queue_inv_dev_entry(iommu, devid);
1250 iommu_flush_tlb_pde(domain); 1294 iommu_flush_tlb_pde(domain);
1295
1296 return ret;
1251} 1297}
1252 1298
1253/* 1299/*
1254 * Removes a device from a protection domain (unlocked) 1300 * Removes a device from a protection domain (unlocked)
1255 */ 1301 */
1256static void __detach_device(struct protection_domain *domain, u16 devid) 1302static void __detach_device(struct device *dev)
1257{ 1303{
1304 u16 devid = get_device_id(dev);
1258 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; 1305 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1259 1306
1260 BUG_ON(!iommu); 1307 BUG_ON(!iommu);
1261 1308
1262 /* lock domain */ 1309 clear_dte_entry(devid);
1263 spin_lock(&domain->lock);
1264
1265 /* remove domain from the lookup table */
1266 amd_iommu_pd_table[devid] = NULL;
1267
1268 /* remove entry from the device table seen by the hardware */
1269 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1270 amd_iommu_dev_table[devid].data[1] = 0;
1271 amd_iommu_dev_table[devid].data[2] = 0;
1272
1273 amd_iommu_apply_erratum_63(devid);
1274
1275 /* decrease reference counters */
1276 domain->dev_iommu[iommu->index] -= 1;
1277 domain->dev_cnt -= 1;
1278
1279 /* ready */
1280 spin_unlock(&domain->lock);
1281 1310
1282 /* 1311 /*
1283 * If we run in passthrough mode the device must be assigned to the 1312 * If we run in passthrough mode the device must be assigned to the
1284 * passthrough domain if it is detached from any other domain 1313 * passthrough domain if it is detached from any other domain
1285 */ 1314 */
1286 if (iommu_pass_through) { 1315 if (iommu_pass_through)
1287 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; 1316 __attach_device(dev, pt_domain);
1288 __attach_device(iommu, pt_domain, devid);
1289 }
1290} 1317}
1291 1318
1292/* 1319/*
1293 * Removes a device from a protection domain (with devtable_lock held) 1320 * Removes a device from a protection domain (with devtable_lock held)
1294 */ 1321 */
1295static void detach_device(struct protection_domain *domain, u16 devid) 1322static void detach_device(struct device *dev)
1296{ 1323{
1297 unsigned long flags; 1324 unsigned long flags;
1298 1325
1299 /* lock device table */ 1326 /* lock device table */
1300 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1327 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1301 __detach_device(domain, devid); 1328 __detach_device(dev);
1302 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1329 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1303} 1330}
1304 1331
1332/*
1333 * Find out the protection domain structure for a given PCI device. This
1334 * will give us the pointer to the page table root for example.
1335 */
1336static struct protection_domain *domain_for_device(struct device *dev)
1337{
1338 struct protection_domain *dom;
1339 unsigned long flags;
1340 u16 devid, alias;
1341
1342 devid = get_device_id(dev);
1343 alias = amd_iommu_alias_table[devid];
1344
1345 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
1346 dom = amd_iommu_pd_table[devid];
1347 if (dom == NULL &&
1348 amd_iommu_pd_table[alias] != NULL) {
1349 __attach_device(dev, amd_iommu_pd_table[alias]);
1350 dom = amd_iommu_pd_table[devid];
1351 }
1352
1353 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1354
1355 return dom;
1356}
1357
1305static int device_change_notifier(struct notifier_block *nb, 1358static int device_change_notifier(struct notifier_block *nb,
1306 unsigned long action, void *data) 1359 unsigned long action, void *data)
1307{ 1360{
@@ -1322,7 +1375,7 @@ static int device_change_notifier(struct notifier_block *nb,
1322 if (iommu == NULL) 1375 if (iommu == NULL)
1323 goto out; 1376 goto out;
1324 1377
1325 domain = domain_for_device(devid); 1378 domain = domain_for_device(dev);
1326 1379
1327 if (domain && !dma_ops_domain(domain)) 1380 if (domain && !dma_ops_domain(domain))
1328 WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound " 1381 WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound "
@@ -1334,7 +1387,7 @@ static int device_change_notifier(struct notifier_block *nb,
1334 goto out; 1387 goto out;
1335 if (iommu_pass_through) 1388 if (iommu_pass_through)
1336 break; 1389 break;
1337 detach_device(domain, devid); 1390 detach_device(dev);
1338 break; 1391 break;
1339 case BUS_NOTIFY_ADD_DEVICE: 1392 case BUS_NOTIFY_ADD_DEVICE:
1340 /* allocate a protection domain if a device is added */ 1393 /* allocate a protection domain if a device is added */
@@ -1441,30 +1494,25 @@ static bool get_device_resources(struct device *dev,
1441{ 1494{
1442 struct dma_ops_domain *dma_dom; 1495 struct dma_ops_domain *dma_dom;
1443 struct amd_iommu *iommu; 1496 struct amd_iommu *iommu;
1444 struct pci_dev *pcidev;
1445 u16 _bdf;
1446 1497
1447 if (!check_device(dev)) 1498 if (!check_device(dev))
1448 return false; 1499 return false;
1449 1500
1450 pcidev = to_pci_dev(dev); 1501 *bdf = get_device_id(dev);
1451 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); 1502 *domain = domain_for_device(dev);
1452 *bdf = amd_iommu_alias_table[_bdf];
1453 iommu = amd_iommu_rlookup_table[*bdf]; 1503 iommu = amd_iommu_rlookup_table[*bdf];
1454 *domain = domain_for_device(*bdf);
1455 1504
1456 if (*domain == NULL) { 1505 if (*domain != NULL)
1457 dma_dom = find_protection_domain(*bdf); 1506 return true;
1458 if (!dma_dom)
1459 dma_dom = iommu->default_dom;
1460 *domain = &dma_dom->domain;
1461 attach_device(iommu, *domain, *bdf);
1462 DUMP_printk("Using protection domain %d for device %s\n",
1463 (*domain)->id, dev_name(dev));
1464 }
1465 1507
1466 if (domain_for_device(_bdf) == NULL) 1508 /* Device not bount yet - bind it */
1467 attach_device(iommu, *domain, _bdf); 1509 dma_dom = find_protection_domain(*bdf);
1510 if (!dma_dom)
1511 dma_dom = iommu->default_dom;
1512 *domain = &dma_dom->domain;
1513 attach_device(dev, *domain);
1514 DUMP_printk("Using protection domain %d for device %s\n",
1515 (*domain)->id, dev_name(dev));
1468 1516
1469 return true; 1517 return true;
1470} 1518}
@@ -2068,7 +2116,7 @@ static void prealloc_protection_domains(void)
2068 if (devid > amd_iommu_last_bdf) 2116 if (devid > amd_iommu_last_bdf)
2069 continue; 2117 continue;
2070 devid = amd_iommu_alias_table[devid]; 2118 devid = amd_iommu_alias_table[devid];
2071 if (domain_for_device(devid)) 2119 if (domain_for_device(&dev->dev))
2072 continue; 2120 continue;
2073 iommu = amd_iommu_rlookup_table[devid]; 2121 iommu = amd_iommu_rlookup_table[devid];
2074 if (!iommu) 2122 if (!iommu)
@@ -2079,9 +2127,7 @@ static void prealloc_protection_domains(void)
2079 init_unity_mappings_for_device(dma_dom, devid); 2127 init_unity_mappings_for_device(dma_dom, devid);
2080 dma_dom->target_dev = devid; 2128 dma_dom->target_dev = devid;
2081 2129
2082 attach_device(iommu, &dma_dom->domain, devid); 2130 attach_device(&dev->dev, &dma_dom->domain);
2083 if (__devid != devid)
2084 attach_device(iommu, &dma_dom->domain, __devid);
2085 2131
2086 list_add_tail(&dma_dom->list, &iommu_pd_list); 2132 list_add_tail(&dma_dom->list, &iommu_pd_list);
2087 } 2133 }
@@ -2174,7 +2220,7 @@ static void cleanup_domain(struct protection_domain *domain)
2174 2220
2175 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) 2221 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2176 if (amd_iommu_pd_table[devid] == domain) 2222 if (amd_iommu_pd_table[devid] == domain)
2177 __detach_device(domain, devid); 2223 clear_dte_entry(devid);
2178 2224
2179 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2225 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2180} 2226}
@@ -2262,7 +2308,6 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2262static void amd_iommu_detach_device(struct iommu_domain *dom, 2308static void amd_iommu_detach_device(struct iommu_domain *dom,
2263 struct device *dev) 2309 struct device *dev)
2264{ 2310{
2265 struct protection_domain *domain = dom->priv;
2266 struct amd_iommu *iommu; 2311 struct amd_iommu *iommu;
2267 struct pci_dev *pdev; 2312 struct pci_dev *pdev;
2268 u16 devid; 2313 u16 devid;
@@ -2275,7 +2320,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
2275 devid = calc_devid(pdev->bus->number, pdev->devfn); 2320 devid = calc_devid(pdev->bus->number, pdev->devfn);
2276 2321
2277 if (devid > 0) 2322 if (devid > 0)
2278 detach_device(domain, devid); 2323 detach_device(dev);
2279 2324
2280 iommu = amd_iommu_rlookup_table[devid]; 2325 iommu = amd_iommu_rlookup_table[devid];
2281 if (!iommu) 2326 if (!iommu)
@@ -2292,6 +2337,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
2292 struct protection_domain *old_domain; 2337 struct protection_domain *old_domain;
2293 struct amd_iommu *iommu; 2338 struct amd_iommu *iommu;
2294 struct pci_dev *pdev; 2339 struct pci_dev *pdev;
2340 int ret;
2295 u16 devid; 2341 u16 devid;
2296 2342
2297 if (dev->bus != &pci_bus_type) 2343 if (dev->bus != &pci_bus_type)
@@ -2309,15 +2355,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
2309 if (!iommu) 2355 if (!iommu)
2310 return -EINVAL; 2356 return -EINVAL;
2311 2357
2312 old_domain = domain_for_device(devid); 2358 old_domain = amd_iommu_pd_table[devid];
2313 if (old_domain) 2359 if (old_domain)
2314 detach_device(old_domain, devid); 2360 detach_device(dev);
2315 2361
2316 attach_device(iommu, domain, devid); 2362 ret = attach_device(dev, domain);
2317 2363
2318 iommu_completion_wait(iommu); 2364 iommu_completion_wait(iommu);
2319 2365
2320 return 0; 2366 return ret;
2321} 2367}
2322 2368
2323static int amd_iommu_map_range(struct iommu_domain *dom, 2369static int amd_iommu_map_range(struct iommu_domain *dom,
@@ -2414,8 +2460,9 @@ static struct iommu_ops amd_iommu_ops = {
2414 2460
2415int __init amd_iommu_init_passthrough(void) 2461int __init amd_iommu_init_passthrough(void)
2416{ 2462{
2463 struct amd_iommu *iommu;
2417 struct pci_dev *dev = NULL; 2464 struct pci_dev *dev = NULL;
2418 u16 devid, devid2; 2465 u16 devid;
2419 2466
2420 /* allocate passthroug domain */ 2467 /* allocate passthroug domain */
2421 pt_domain = protection_domain_alloc(); 2468 pt_domain = protection_domain_alloc();
@@ -2425,20 +2472,16 @@ int __init amd_iommu_init_passthrough(void)
2425 pt_domain->mode |= PAGE_MODE_NONE; 2472 pt_domain->mode |= PAGE_MODE_NONE;
2426 2473
2427 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2474 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
2428 struct amd_iommu *iommu;
2429 2475
2430 devid = calc_devid(dev->bus->number, dev->devfn); 2476 devid = calc_devid(dev->bus->number, dev->devfn);
2431 if (devid > amd_iommu_last_bdf) 2477 if (devid > amd_iommu_last_bdf)
2432 continue; 2478 continue;
2433 2479
2434 devid2 = amd_iommu_alias_table[devid]; 2480 iommu = amd_iommu_rlookup_table[devid];
2435
2436 iommu = amd_iommu_rlookup_table[devid2];
2437 if (!iommu) 2481 if (!iommu)
2438 continue; 2482 continue;
2439 2483
2440 __attach_device(iommu, pt_domain, devid); 2484 attach_device(&dev->dev, pt_domain);
2441 __attach_device(iommu, pt_domain, devid2);
2442 } 2485 }
2443 2486
2444 pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); 2487 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");