diff options
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r-- | drivers/pci/intel-iommu.c | 239 |
1 files changed, 210 insertions, 29 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 23e56a564e05..dcda5212f3bb 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/iova.h> | 36 | #include <linux/iova.h> |
37 | #include <linux/iommu.h> | 37 | #include <linux/iommu.h> |
38 | #include <linux/intel-iommu.h> | 38 | #include <linux/intel-iommu.h> |
39 | #include <linux/sysdev.h> | ||
39 | #include <asm/cacheflush.h> | 40 | #include <asm/cacheflush.h> |
40 | #include <asm/iommu.h> | 41 | #include <asm/iommu.h> |
41 | #include "pci.h" | 42 | #include "pci.h" |
@@ -247,7 +248,8 @@ struct dmar_domain { | |||
247 | struct device_domain_info { | 248 | struct device_domain_info { |
248 | struct list_head link; /* link to domain siblings */ | 249 | struct list_head link; /* link to domain siblings */ |
249 | struct list_head global; /* link to global list */ | 250 | struct list_head global; /* link to global list */ |
250 | u8 bus; /* PCI bus numer */ | 251 | int segment; /* PCI domain */ |
252 | u8 bus; /* PCI bus number */ | ||
251 | u8 devfn; /* PCI devfn number */ | 253 | u8 devfn; /* PCI devfn number */ |
252 | struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ | 254 | struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ |
253 | struct dmar_domain *domain; /* pointer to domain */ | 255 | struct dmar_domain *domain; /* pointer to domain */ |
@@ -467,7 +469,7 @@ static void domain_update_iommu_cap(struct dmar_domain *domain) | |||
467 | domain_update_iommu_snooping(domain); | 469 | domain_update_iommu_snooping(domain); |
468 | } | 470 | } |
469 | 471 | ||
470 | static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn) | 472 | static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn) |
471 | { | 473 | { |
472 | struct dmar_drhd_unit *drhd = NULL; | 474 | struct dmar_drhd_unit *drhd = NULL; |
473 | int i; | 475 | int i; |
@@ -475,12 +477,20 @@ static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn) | |||
475 | for_each_drhd_unit(drhd) { | 477 | for_each_drhd_unit(drhd) { |
476 | if (drhd->ignored) | 478 | if (drhd->ignored) |
477 | continue; | 479 | continue; |
480 | if (segment != drhd->segment) | ||
481 | continue; | ||
478 | 482 | ||
479 | for (i = 0; i < drhd->devices_cnt; i++) | 483 | for (i = 0; i < drhd->devices_cnt; i++) { |
480 | if (drhd->devices[i] && | 484 | if (drhd->devices[i] && |
481 | drhd->devices[i]->bus->number == bus && | 485 | drhd->devices[i]->bus->number == bus && |
482 | drhd->devices[i]->devfn == devfn) | 486 | drhd->devices[i]->devfn == devfn) |
483 | return drhd->iommu; | 487 | return drhd->iommu; |
488 | if (drhd->devices[i] && | ||
489 | drhd->devices[i]->subordinate && | ||
490 | drhd->devices[i]->subordinate->number <= bus && | ||
491 | drhd->devices[i]->subordinate->subordinate >= bus) | ||
492 | return drhd->iommu; | ||
493 | } | ||
484 | 494 | ||
485 | if (drhd->include_all) | 495 | if (drhd->include_all) |
486 | return drhd->iommu; | 496 | return drhd->iommu; |
@@ -1312,7 +1322,7 @@ static void domain_exit(struct dmar_domain *domain) | |||
1312 | } | 1322 | } |
1313 | 1323 | ||
1314 | static int domain_context_mapping_one(struct dmar_domain *domain, | 1324 | static int domain_context_mapping_one(struct dmar_domain *domain, |
1315 | u8 bus, u8 devfn) | 1325 | int segment, u8 bus, u8 devfn) |
1316 | { | 1326 | { |
1317 | struct context_entry *context; | 1327 | struct context_entry *context; |
1318 | unsigned long flags; | 1328 | unsigned long flags; |
@@ -1327,7 +1337,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
1327 | bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); | 1337 | bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); |
1328 | BUG_ON(!domain->pgd); | 1338 | BUG_ON(!domain->pgd); |
1329 | 1339 | ||
1330 | iommu = device_to_iommu(bus, devfn); | 1340 | iommu = device_to_iommu(segment, bus, devfn); |
1331 | if (!iommu) | 1341 | if (!iommu) |
1332 | return -ENODEV; | 1342 | return -ENODEV; |
1333 | 1343 | ||
@@ -1417,8 +1427,8 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev) | |||
1417 | int ret; | 1427 | int ret; |
1418 | struct pci_dev *tmp, *parent; | 1428 | struct pci_dev *tmp, *parent; |
1419 | 1429 | ||
1420 | ret = domain_context_mapping_one(domain, pdev->bus->number, | 1430 | ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus), |
1421 | pdev->devfn); | 1431 | pdev->bus->number, pdev->devfn); |
1422 | if (ret) | 1432 | if (ret) |
1423 | return ret; | 1433 | return ret; |
1424 | 1434 | ||
@@ -1429,18 +1439,23 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev) | |||
1429 | /* Secondary interface's bus number and devfn 0 */ | 1439 | /* Secondary interface's bus number and devfn 0 */ |
1430 | parent = pdev->bus->self; | 1440 | parent = pdev->bus->self; |
1431 | while (parent != tmp) { | 1441 | while (parent != tmp) { |
1432 | ret = domain_context_mapping_one(domain, parent->bus->number, | 1442 | ret = domain_context_mapping_one(domain, |
1433 | parent->devfn); | 1443 | pci_domain_nr(parent->bus), |
1444 | parent->bus->number, | ||
1445 | parent->devfn); | ||
1434 | if (ret) | 1446 | if (ret) |
1435 | return ret; | 1447 | return ret; |
1436 | parent = parent->bus->self; | 1448 | parent = parent->bus->self; |
1437 | } | 1449 | } |
1438 | if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ | 1450 | if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ |
1439 | return domain_context_mapping_one(domain, | 1451 | return domain_context_mapping_one(domain, |
1440 | tmp->subordinate->number, 0); | 1452 | pci_domain_nr(tmp->subordinate), |
1453 | tmp->subordinate->number, 0); | ||
1441 | else /* this is a legacy PCI bridge */ | 1454 | else /* this is a legacy PCI bridge */ |
1442 | return domain_context_mapping_one(domain, | 1455 | return domain_context_mapping_one(domain, |
1443 | tmp->bus->number, tmp->devfn); | 1456 | pci_domain_nr(tmp->bus), |
1457 | tmp->bus->number, | ||
1458 | tmp->devfn); | ||
1444 | } | 1459 | } |
1445 | 1460 | ||
1446 | static int domain_context_mapped(struct pci_dev *pdev) | 1461 | static int domain_context_mapped(struct pci_dev *pdev) |
@@ -1449,12 +1464,12 @@ static int domain_context_mapped(struct pci_dev *pdev) | |||
1449 | struct pci_dev *tmp, *parent; | 1464 | struct pci_dev *tmp, *parent; |
1450 | struct intel_iommu *iommu; | 1465 | struct intel_iommu *iommu; |
1451 | 1466 | ||
1452 | iommu = device_to_iommu(pdev->bus->number, pdev->devfn); | 1467 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, |
1468 | pdev->devfn); | ||
1453 | if (!iommu) | 1469 | if (!iommu) |
1454 | return -ENODEV; | 1470 | return -ENODEV; |
1455 | 1471 | ||
1456 | ret = device_context_mapped(iommu, | 1472 | ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn); |
1457 | pdev->bus->number, pdev->devfn); | ||
1458 | if (!ret) | 1473 | if (!ret) |
1459 | return ret; | 1474 | return ret; |
1460 | /* dependent device mapping */ | 1475 | /* dependent device mapping */ |
@@ -1465,17 +1480,17 @@ static int domain_context_mapped(struct pci_dev *pdev) | |||
1465 | parent = pdev->bus->self; | 1480 | parent = pdev->bus->self; |
1466 | while (parent != tmp) { | 1481 | while (parent != tmp) { |
1467 | ret = device_context_mapped(iommu, parent->bus->number, | 1482 | ret = device_context_mapped(iommu, parent->bus->number, |
1468 | parent->devfn); | 1483 | parent->devfn); |
1469 | if (!ret) | 1484 | if (!ret) |
1470 | return ret; | 1485 | return ret; |
1471 | parent = parent->bus->self; | 1486 | parent = parent->bus->self; |
1472 | } | 1487 | } |
1473 | if (tmp->is_pcie) | 1488 | if (tmp->is_pcie) |
1474 | return device_context_mapped(iommu, | 1489 | return device_context_mapped(iommu, tmp->subordinate->number, |
1475 | tmp->subordinate->number, 0); | 1490 | 0); |
1476 | else | 1491 | else |
1477 | return device_context_mapped(iommu, | 1492 | return device_context_mapped(iommu, tmp->bus->number, |
1478 | tmp->bus->number, tmp->devfn); | 1493 | tmp->devfn); |
1479 | } | 1494 | } |
1480 | 1495 | ||
1481 | static int | 1496 | static int |
@@ -1542,7 +1557,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain) | |||
1542 | info->dev->dev.archdata.iommu = NULL; | 1557 | info->dev->dev.archdata.iommu = NULL; |
1543 | spin_unlock_irqrestore(&device_domain_lock, flags); | 1558 | spin_unlock_irqrestore(&device_domain_lock, flags); |
1544 | 1559 | ||
1545 | iommu = device_to_iommu(info->bus, info->devfn); | 1560 | iommu = device_to_iommu(info->segment, info->bus, info->devfn); |
1546 | iommu_detach_dev(iommu, info->bus, info->devfn); | 1561 | iommu_detach_dev(iommu, info->bus, info->devfn); |
1547 | free_devinfo_mem(info); | 1562 | free_devinfo_mem(info); |
1548 | 1563 | ||
@@ -1577,11 +1592,14 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
1577 | struct pci_dev *dev_tmp; | 1592 | struct pci_dev *dev_tmp; |
1578 | unsigned long flags; | 1593 | unsigned long flags; |
1579 | int bus = 0, devfn = 0; | 1594 | int bus = 0, devfn = 0; |
1595 | int segment; | ||
1580 | 1596 | ||
1581 | domain = find_domain(pdev); | 1597 | domain = find_domain(pdev); |
1582 | if (domain) | 1598 | if (domain) |
1583 | return domain; | 1599 | return domain; |
1584 | 1600 | ||
1601 | segment = pci_domain_nr(pdev->bus); | ||
1602 | |||
1585 | dev_tmp = pci_find_upstream_pcie_bridge(pdev); | 1603 | dev_tmp = pci_find_upstream_pcie_bridge(pdev); |
1586 | if (dev_tmp) { | 1604 | if (dev_tmp) { |
1587 | if (dev_tmp->is_pcie) { | 1605 | if (dev_tmp->is_pcie) { |
@@ -1593,7 +1611,8 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
1593 | } | 1611 | } |
1594 | spin_lock_irqsave(&device_domain_lock, flags); | 1612 | spin_lock_irqsave(&device_domain_lock, flags); |
1595 | list_for_each_entry(info, &device_domain_list, global) { | 1613 | list_for_each_entry(info, &device_domain_list, global) { |
1596 | if (info->bus == bus && info->devfn == devfn) { | 1614 | if (info->segment == segment && |
1615 | info->bus == bus && info->devfn == devfn) { | ||
1597 | found = info->domain; | 1616 | found = info->domain; |
1598 | break; | 1617 | break; |
1599 | } | 1618 | } |
@@ -1631,6 +1650,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
1631 | domain_exit(domain); | 1650 | domain_exit(domain); |
1632 | goto error; | 1651 | goto error; |
1633 | } | 1652 | } |
1653 | info->segment = segment; | ||
1634 | info->bus = bus; | 1654 | info->bus = bus; |
1635 | info->devfn = devfn; | 1655 | info->devfn = devfn; |
1636 | info->dev = NULL; | 1656 | info->dev = NULL; |
@@ -1642,7 +1662,8 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
1642 | found = NULL; | 1662 | found = NULL; |
1643 | spin_lock_irqsave(&device_domain_lock, flags); | 1663 | spin_lock_irqsave(&device_domain_lock, flags); |
1644 | list_for_each_entry(tmp, &device_domain_list, global) { | 1664 | list_for_each_entry(tmp, &device_domain_list, global) { |
1645 | if (tmp->bus == bus && tmp->devfn == devfn) { | 1665 | if (tmp->segment == segment && |
1666 | tmp->bus == bus && tmp->devfn == devfn) { | ||
1646 | found = tmp->domain; | 1667 | found = tmp->domain; |
1647 | break; | 1668 | break; |
1648 | } | 1669 | } |
@@ -1662,6 +1683,7 @@ found_domain: | |||
1662 | info = alloc_devinfo_mem(); | 1683 | info = alloc_devinfo_mem(); |
1663 | if (!info) | 1684 | if (!info) |
1664 | goto error; | 1685 | goto error; |
1686 | info->segment = segment; | ||
1665 | info->bus = pdev->bus->number; | 1687 | info->bus = pdev->bus->number; |
1666 | info->devfn = pdev->devfn; | 1688 | info->devfn = pdev->devfn; |
1667 | info->dev = pdev; | 1689 | info->dev = pdev; |
@@ -1946,6 +1968,15 @@ static int __init init_dmars(void) | |||
1946 | } | 1968 | } |
1947 | } | 1969 | } |
1948 | 1970 | ||
1971 | #ifdef CONFIG_INTR_REMAP | ||
1972 | if (!intr_remapping_enabled) { | ||
1973 | ret = enable_intr_remapping(0); | ||
1974 | if (ret) | ||
1975 | printk(KERN_ERR | ||
1976 | "IOMMU: enable interrupt remapping failed\n"); | ||
1977 | } | ||
1978 | #endif | ||
1979 | |||
1949 | /* | 1980 | /* |
1950 | * For each rmrr | 1981 | * For each rmrr |
1951 | * for each dev attached to rmrr | 1982 | * for each dev attached to rmrr |
@@ -2597,6 +2628,150 @@ static void __init init_no_remapping_devices(void) | |||
2597 | } | 2628 | } |
2598 | } | 2629 | } |
2599 | 2630 | ||
2631 | #ifdef CONFIG_SUSPEND | ||
2632 | static int init_iommu_hw(void) | ||
2633 | { | ||
2634 | struct dmar_drhd_unit *drhd; | ||
2635 | struct intel_iommu *iommu = NULL; | ||
2636 | |||
2637 | for_each_active_iommu(iommu, drhd) | ||
2638 | if (iommu->qi) | ||
2639 | dmar_reenable_qi(iommu); | ||
2640 | |||
2641 | for_each_active_iommu(iommu, drhd) { | ||
2642 | iommu_flush_write_buffer(iommu); | ||
2643 | |||
2644 | iommu_set_root_entry(iommu); | ||
2645 | |||
2646 | iommu->flush.flush_context(iommu, 0, 0, 0, | ||
2647 | DMA_CCMD_GLOBAL_INVL, 0); | ||
2648 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | ||
2649 | DMA_TLB_GLOBAL_FLUSH, 0); | ||
2650 | iommu_disable_protect_mem_regions(iommu); | ||
2651 | iommu_enable_translation(iommu); | ||
2652 | } | ||
2653 | |||
2654 | return 0; | ||
2655 | } | ||
2656 | |||
2657 | static void iommu_flush_all(void) | ||
2658 | { | ||
2659 | struct dmar_drhd_unit *drhd; | ||
2660 | struct intel_iommu *iommu; | ||
2661 | |||
2662 | for_each_active_iommu(iommu, drhd) { | ||
2663 | iommu->flush.flush_context(iommu, 0, 0, 0, | ||
2664 | DMA_CCMD_GLOBAL_INVL, 0); | ||
2665 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | ||
2666 | DMA_TLB_GLOBAL_FLUSH, 0); | ||
2667 | } | ||
2668 | } | ||
2669 | |||
2670 | static int iommu_suspend(struct sys_device *dev, pm_message_t state) | ||
2671 | { | ||
2672 | struct dmar_drhd_unit *drhd; | ||
2673 | struct intel_iommu *iommu = NULL; | ||
2674 | unsigned long flag; | ||
2675 | |||
2676 | for_each_active_iommu(iommu, drhd) { | ||
2677 | iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS, | ||
2678 | GFP_ATOMIC); | ||
2679 | if (!iommu->iommu_state) | ||
2680 | goto nomem; | ||
2681 | } | ||
2682 | |||
2683 | iommu_flush_all(); | ||
2684 | |||
2685 | for_each_active_iommu(iommu, drhd) { | ||
2686 | iommu_disable_translation(iommu); | ||
2687 | |||
2688 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
2689 | |||
2690 | iommu->iommu_state[SR_DMAR_FECTL_REG] = | ||
2691 | readl(iommu->reg + DMAR_FECTL_REG); | ||
2692 | iommu->iommu_state[SR_DMAR_FEDATA_REG] = | ||
2693 | readl(iommu->reg + DMAR_FEDATA_REG); | ||
2694 | iommu->iommu_state[SR_DMAR_FEADDR_REG] = | ||
2695 | readl(iommu->reg + DMAR_FEADDR_REG); | ||
2696 | iommu->iommu_state[SR_DMAR_FEUADDR_REG] = | ||
2697 | readl(iommu->reg + DMAR_FEUADDR_REG); | ||
2698 | |||
2699 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
2700 | } | ||
2701 | return 0; | ||
2702 | |||
2703 | nomem: | ||
2704 | for_each_active_iommu(iommu, drhd) | ||
2705 | kfree(iommu->iommu_state); | ||
2706 | |||
2707 | return -ENOMEM; | ||
2708 | } | ||
2709 | |||
2710 | static int iommu_resume(struct sys_device *dev) | ||
2711 | { | ||
2712 | struct dmar_drhd_unit *drhd; | ||
2713 | struct intel_iommu *iommu = NULL; | ||
2714 | unsigned long flag; | ||
2715 | |||
2716 | if (init_iommu_hw()) { | ||
2717 | WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); | ||
2718 | return -EIO; | ||
2719 | } | ||
2720 | |||
2721 | for_each_active_iommu(iommu, drhd) { | ||
2722 | |||
2723 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
2724 | |||
2725 | writel(iommu->iommu_state[SR_DMAR_FECTL_REG], | ||
2726 | iommu->reg + DMAR_FECTL_REG); | ||
2727 | writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], | ||
2728 | iommu->reg + DMAR_FEDATA_REG); | ||
2729 | writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], | ||
2730 | iommu->reg + DMAR_FEADDR_REG); | ||
2731 | writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], | ||
2732 | iommu->reg + DMAR_FEUADDR_REG); | ||
2733 | |||
2734 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
2735 | } | ||
2736 | |||
2737 | for_each_active_iommu(iommu, drhd) | ||
2738 | kfree(iommu->iommu_state); | ||
2739 | |||
2740 | return 0; | ||
2741 | } | ||
2742 | |||
2743 | static struct sysdev_class iommu_sysclass = { | ||
2744 | .name = "iommu", | ||
2745 | .resume = iommu_resume, | ||
2746 | .suspend = iommu_suspend, | ||
2747 | }; | ||
2748 | |||
2749 | static struct sys_device device_iommu = { | ||
2750 | .cls = &iommu_sysclass, | ||
2751 | }; | ||
2752 | |||
2753 | static int __init init_iommu_sysfs(void) | ||
2754 | { | ||
2755 | int error; | ||
2756 | |||
2757 | error = sysdev_class_register(&iommu_sysclass); | ||
2758 | if (error) | ||
2759 | return error; | ||
2760 | |||
2761 | error = sysdev_register(&device_iommu); | ||
2762 | if (error) | ||
2763 | sysdev_class_unregister(&iommu_sysclass); | ||
2764 | |||
2765 | return error; | ||
2766 | } | ||
2767 | |||
2768 | #else | ||
2769 | static int __init init_iommu_sysfs(void) | ||
2770 | { | ||
2771 | return 0; | ||
2772 | } | ||
2773 | #endif /* CONFIG_PM */ | ||
2774 | |||
2600 | int __init intel_iommu_init(void) | 2775 | int __init intel_iommu_init(void) |
2601 | { | 2776 | { |
2602 | int ret = 0; | 2777 | int ret = 0; |
@@ -2632,6 +2807,7 @@ int __init intel_iommu_init(void) | |||
2632 | init_timer(&unmap_timer); | 2807 | init_timer(&unmap_timer); |
2633 | force_iommu = 1; | 2808 | force_iommu = 1; |
2634 | dma_ops = &intel_dma_ops; | 2809 | dma_ops = &intel_dma_ops; |
2810 | init_iommu_sysfs(); | ||
2635 | 2811 | ||
2636 | register_iommu(&intel_iommu_ops); | 2812 | register_iommu(&intel_iommu_ops); |
2637 | 2813 | ||
@@ -2648,6 +2824,7 @@ static int vm_domain_add_dev_info(struct dmar_domain *domain, | |||
2648 | if (!info) | 2824 | if (!info) |
2649 | return -ENOMEM; | 2825 | return -ENOMEM; |
2650 | 2826 | ||
2827 | info->segment = pci_domain_nr(pdev->bus); | ||
2651 | info->bus = pdev->bus->number; | 2828 | info->bus = pdev->bus->number; |
2652 | info->devfn = pdev->devfn; | 2829 | info->devfn = pdev->devfn; |
2653 | info->dev = pdev; | 2830 | info->dev = pdev; |
@@ -2677,15 +2854,15 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | |||
2677 | parent = pdev->bus->self; | 2854 | parent = pdev->bus->self; |
2678 | while (parent != tmp) { | 2855 | while (parent != tmp) { |
2679 | iommu_detach_dev(iommu, parent->bus->number, | 2856 | iommu_detach_dev(iommu, parent->bus->number, |
2680 | parent->devfn); | 2857 | parent->devfn); |
2681 | parent = parent->bus->self; | 2858 | parent = parent->bus->self; |
2682 | } | 2859 | } |
2683 | if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ | 2860 | if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ |
2684 | iommu_detach_dev(iommu, | 2861 | iommu_detach_dev(iommu, |
2685 | tmp->subordinate->number, 0); | 2862 | tmp->subordinate->number, 0); |
2686 | else /* this is a legacy PCI bridge */ | 2863 | else /* this is a legacy PCI bridge */ |
2687 | iommu_detach_dev(iommu, | 2864 | iommu_detach_dev(iommu, tmp->bus->number, |
2688 | tmp->bus->number, tmp->devfn); | 2865 | tmp->devfn); |
2689 | } | 2866 | } |
2690 | } | 2867 | } |
2691 | 2868 | ||
@@ -2698,13 +2875,15 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, | |||
2698 | int found = 0; | 2875 | int found = 0; |
2699 | struct list_head *entry, *tmp; | 2876 | struct list_head *entry, *tmp; |
2700 | 2877 | ||
2701 | iommu = device_to_iommu(pdev->bus->number, pdev->devfn); | 2878 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, |
2879 | pdev->devfn); | ||
2702 | if (!iommu) | 2880 | if (!iommu) |
2703 | return; | 2881 | return; |
2704 | 2882 | ||
2705 | spin_lock_irqsave(&device_domain_lock, flags); | 2883 | spin_lock_irqsave(&device_domain_lock, flags); |
2706 | list_for_each_safe(entry, tmp, &domain->devices) { | 2884 | list_for_each_safe(entry, tmp, &domain->devices) { |
2707 | info = list_entry(entry, struct device_domain_info, link); | 2885 | info = list_entry(entry, struct device_domain_info, link); |
2886 | /* No need to compare PCI domain; it has to be the same */ | ||
2708 | if (info->bus == pdev->bus->number && | 2887 | if (info->bus == pdev->bus->number && |
2709 | info->devfn == pdev->devfn) { | 2888 | info->devfn == pdev->devfn) { |
2710 | list_del(&info->link); | 2889 | list_del(&info->link); |
@@ -2729,7 +2908,8 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, | |||
2729 | * owned by this domain, clear this iommu in iommu_bmp | 2908 | * owned by this domain, clear this iommu in iommu_bmp |
2730 | * update iommu count and coherency | 2909 | * update iommu count and coherency |
2731 | */ | 2910 | */ |
2732 | if (device_to_iommu(info->bus, info->devfn) == iommu) | 2911 | if (iommu == device_to_iommu(info->segment, info->bus, |
2912 | info->devfn)) | ||
2733 | found = 1; | 2913 | found = 1; |
2734 | } | 2914 | } |
2735 | 2915 | ||
@@ -2762,7 +2942,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) | |||
2762 | 2942 | ||
2763 | spin_unlock_irqrestore(&device_domain_lock, flags1); | 2943 | spin_unlock_irqrestore(&device_domain_lock, flags1); |
2764 | 2944 | ||
2765 | iommu = device_to_iommu(info->bus, info->devfn); | 2945 | iommu = device_to_iommu(info->segment, info->bus, info->devfn); |
2766 | iommu_detach_dev(iommu, info->bus, info->devfn); | 2946 | iommu_detach_dev(iommu, info->bus, info->devfn); |
2767 | iommu_detach_dependent_devices(iommu, info->dev); | 2947 | iommu_detach_dependent_devices(iommu, info->dev); |
2768 | 2948 | ||
@@ -2950,7 +3130,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
2950 | } | 3130 | } |
2951 | } | 3131 | } |
2952 | 3132 | ||
2953 | iommu = device_to_iommu(pdev->bus->number, pdev->devfn); | 3133 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, |
3134 | pdev->devfn); | ||
2954 | if (!iommu) | 3135 | if (!iommu) |
2955 | return -ENODEV; | 3136 | return -ENODEV; |
2956 | 3137 | ||