diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-11-26 08:49:59 -0500 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-11-27 08:20:35 -0500 |
commit | 7f760ddd702d162d693bc79f62c3bdd7fe55bd9d (patch) | |
tree | 82ee348a6777d500d9744595864169fcc6279c79 | |
parent | 7c392cbe984d904f7c89a6a75b2ac245254e8da5 (diff) |
x86/amd-iommu: Cleanup attach/detach_device code
This patch cleans up the attach_device and detach_device
paths and fixes reference counting while at it.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 102 |
1 files changed, 58 insertions, 44 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 530d6080940f..e3363fd5eef5 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -1329,7 +1329,6 @@ static bool dma_ops_domain(struct protection_domain *domain) | |||
1329 | 1329 | ||
1330 | static void set_dte_entry(u16 devid, struct protection_domain *domain) | 1330 | static void set_dte_entry(u16 devid, struct protection_domain *domain) |
1331 | { | 1331 | { |
1332 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | ||
1333 | u64 pte_root = virt_to_phys(domain->pt_root); | 1332 | u64 pte_root = virt_to_phys(domain->pt_root); |
1334 | 1333 | ||
1335 | BUG_ON(amd_iommu_pd_table[devid] != NULL); | 1334 | BUG_ON(amd_iommu_pd_table[devid] != NULL); |
@@ -1344,18 +1343,11 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain) | |||
1344 | 1343 | ||
1345 | amd_iommu_pd_table[devid] = domain; | 1344 | amd_iommu_pd_table[devid] = domain; |
1346 | 1345 | ||
1347 | /* Do reference counting */ | ||
1348 | domain->dev_iommu[iommu->index] += 1; | ||
1349 | domain->dev_cnt += 1; | ||
1350 | |||
1351 | /* Flush the changes DTE entry */ | ||
1352 | iommu_queue_inv_dev_entry(iommu, devid); | ||
1353 | } | 1346 | } |
1354 | 1347 | ||
1355 | static void clear_dte_entry(u16 devid) | 1348 | static void clear_dte_entry(u16 devid) |
1356 | { | 1349 | { |
1357 | struct protection_domain *domain = amd_iommu_pd_table[devid]; | 1350 | struct protection_domain *domain = amd_iommu_pd_table[devid]; |
1358 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | ||
1359 | 1351 | ||
1360 | BUG_ON(domain == NULL); | 1352 | BUG_ON(domain == NULL); |
1361 | 1353 | ||
@@ -1368,11 +1360,51 @@ static void clear_dte_entry(u16 devid) | |||
1368 | amd_iommu_dev_table[devid].data[2] = 0; | 1360 | amd_iommu_dev_table[devid].data[2] = 0; |
1369 | 1361 | ||
1370 | amd_iommu_apply_erratum_63(devid); | 1362 | amd_iommu_apply_erratum_63(devid); |
1363 | } | ||
1364 | |||
1365 | static void do_attach(struct device *dev, struct protection_domain *domain) | ||
1366 | { | ||
1367 | struct iommu_dev_data *dev_data; | ||
1368 | struct amd_iommu *iommu; | ||
1369 | u16 devid; | ||
1370 | |||
1371 | devid = get_device_id(dev); | ||
1372 | iommu = amd_iommu_rlookup_table[devid]; | ||
1373 | dev_data = get_dev_data(dev); | ||
1374 | |||
1375 | /* Update data structures */ | ||
1376 | dev_data->domain = domain; | ||
1377 | list_add(&dev_data->list, &domain->dev_list); | ||
1378 | set_dte_entry(devid, domain); | ||
1379 | |||
1380 | /* Do reference counting */ | ||
1381 | domain->dev_iommu[iommu->index] += 1; | ||
1382 | domain->dev_cnt += 1; | ||
1383 | |||
1384 | /* Flush the DTE entry */ | ||
1385 | iommu_queue_inv_dev_entry(iommu, devid); | ||
1386 | } | ||
1387 | |||
1388 | static void do_detach(struct device *dev) | ||
1389 | { | ||
1390 | struct iommu_dev_data *dev_data; | ||
1391 | struct amd_iommu *iommu; | ||
1392 | u16 devid; | ||
1393 | |||
1394 | devid = get_device_id(dev); | ||
1395 | iommu = amd_iommu_rlookup_table[devid]; | ||
1396 | dev_data = get_dev_data(dev); | ||
1371 | 1397 | ||
1372 | /* decrease reference counters */ | 1398 | /* decrease reference counters */ |
1373 | domain->dev_iommu[iommu->index] -= 1; | 1399 | dev_data->domain->dev_iommu[iommu->index] -= 1; |
1374 | domain->dev_cnt -= 1; | 1400 | dev_data->domain->dev_cnt -= 1; |
1401 | |||
1402 | /* Update data structures */ | ||
1403 | dev_data->domain = NULL; | ||
1404 | list_del(&dev_data->list); | ||
1405 | clear_dte_entry(devid); | ||
1375 | 1406 | ||
1407 | /* Flush the DTE entry */ | ||
1376 | iommu_queue_inv_dev_entry(iommu, devid); | 1408 | iommu_queue_inv_dev_entry(iommu, devid); |
1377 | } | 1409 | } |
1378 | 1410 | ||
@@ -1384,12 +1416,10 @@ static int __attach_device(struct device *dev, | |||
1384 | struct protection_domain *domain) | 1416 | struct protection_domain *domain) |
1385 | { | 1417 | { |
1386 | struct iommu_dev_data *dev_data, *alias_data; | 1418 | struct iommu_dev_data *dev_data, *alias_data; |
1387 | u16 devid, alias; | ||
1388 | 1419 | ||
1389 | devid = get_device_id(dev); | ||
1390 | alias = amd_iommu_alias_table[devid]; | ||
1391 | dev_data = get_dev_data(dev); | 1420 | dev_data = get_dev_data(dev); |
1392 | alias_data = get_dev_data(dev_data->alias); | 1421 | alias_data = get_dev_data(dev_data->alias); |
1422 | |||
1393 | if (!alias_data) | 1423 | if (!alias_data) |
1394 | return -EINVAL; | 1424 | return -EINVAL; |
1395 | 1425 | ||
@@ -1406,21 +1436,16 @@ static int __attach_device(struct device *dev, | |||
1406 | return -EBUSY; | 1436 | return -EBUSY; |
1407 | 1437 | ||
1408 | /* Do real assignment */ | 1438 | /* Do real assignment */ |
1409 | if (alias != devid) { | 1439 | if (dev_data->alias != dev) { |
1410 | if (alias_data->domain == NULL) { | 1440 | alias_data = get_dev_data(dev_data->alias); |
1411 | alias_data->domain = domain; | 1441 | if (alias_data->domain == NULL) |
1412 | list_add(&alias_data->list, &domain->dev_list); | 1442 | do_attach(dev_data->alias, domain); |
1413 | set_dte_entry(alias, domain); | ||
1414 | } | ||
1415 | 1443 | ||
1416 | atomic_inc(&alias_data->bind); | 1444 | atomic_inc(&alias_data->bind); |
1417 | } | 1445 | } |
1418 | 1446 | ||
1419 | if (dev_data->domain == NULL) { | 1447 | if (dev_data->domain == NULL) |
1420 | dev_data->domain = domain; | 1448 | do_attach(dev, domain); |
1421 | list_add(&dev_data->list, &domain->dev_list); | ||
1422 | set_dte_entry(devid, domain); | ||
1423 | } | ||
1424 | 1449 | ||
1425 | atomic_inc(&dev_data->bind); | 1450 | atomic_inc(&dev_data->bind); |
1426 | 1451 | ||
@@ -1459,35 +1484,24 @@ static int attach_device(struct device *dev, | |||
1459 | */ | 1484 | */ |
1460 | static void __detach_device(struct device *dev) | 1485 | static void __detach_device(struct device *dev) |
1461 | { | 1486 | { |
1462 | u16 devid = get_device_id(dev), alias; | ||
1463 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | ||
1464 | struct iommu_dev_data *dev_data = get_dev_data(dev); | 1487 | struct iommu_dev_data *dev_data = get_dev_data(dev); |
1465 | struct iommu_dev_data *alias_data; | 1488 | struct iommu_dev_data *alias_data; |
1466 | unsigned long flags; | 1489 | unsigned long flags; |
1467 | 1490 | ||
1468 | BUG_ON(!iommu); | 1491 | BUG_ON(!dev_data->domain); |
1469 | 1492 | ||
1470 | devid = get_device_id(dev); | 1493 | spin_lock_irqsave(&dev_data->domain->lock, flags); |
1471 | alias = get_device_id(dev_data->alias); | ||
1472 | 1494 | ||
1473 | if (devid != alias) { | 1495 | if (dev_data->alias != dev) { |
1474 | alias_data = get_dev_data(dev_data->alias); | 1496 | alias_data = get_dev_data(dev_data->alias); |
1475 | if (atomic_dec_and_test(&alias_data->bind)) { | 1497 | if (atomic_dec_and_test(&alias_data->bind)) |
1476 | spin_lock_irqsave(&alias_data->domain->lock, flags); | 1498 | do_detach(dev_data->alias); |
1477 | clear_dte_entry(alias); | ||
1478 | list_del(&alias_data->list); | ||
1479 | spin_unlock_irqrestore(&alias_data->domain->lock, flags); | ||
1480 | alias_data->domain = NULL; | ||
1481 | } | ||
1482 | } | 1499 | } |
1483 | 1500 | ||
1484 | if (atomic_dec_and_test(&dev_data->bind)) { | 1501 | if (atomic_dec_and_test(&dev_data->bind)) |
1485 | spin_lock_irqsave(&dev_data->domain->lock, flags); | 1502 | do_detach(dev); |
1486 | clear_dte_entry(devid); | 1503 | |
1487 | list_del(&dev_data->list); | 1504 | spin_unlock_irqrestore(&dev_data->domain->lock, flags); |
1488 | spin_unlock_irqrestore(&dev_data->domain->lock, flags); | ||
1489 | dev_data->domain = NULL; | ||
1490 | } | ||
1491 | 1505 | ||
1492 | /* | 1506 | /* |
1493 | * If we run in passthrough mode the device must be assigned to the | 1507 | * If we run in passthrough mode the device must be assigned to the |