aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-06 17:26:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-06 17:26:05 -0400
commitffa009c366e33f3eae48bba2547051fe15795f64 (patch)
tree78736a4ee7c16819830a32a313867e3a88ac6aff /drivers/pci
parent8e320d02718d2872d52ef88a69a493e420494269 (diff)
parent46f06b72378d3187f0d12f7a60d020676bfbf332 (diff)
Merge git://git.infradead.org/iommu-2.6
* git://git.infradead.org/iommu-2.6: drivers/pci/intr_remapping.c: include acpi.h intel-iommu: Fix oops in device_to_iommu() when devices not found. intel-iommu: Handle PCI domains appropriately. intel-iommu: Fix device-to-iommu mapping for PCI-PCI bridges. x2apic/intr-remap: decouple interrupt remapping from x2apic x86, dmar: check if it's initialized before disable queue invalidation intel-iommu: set compatibility format interrupt Intel IOMMU Suspend/Resume Support - Interrupt Remapping Intel IOMMU Suspend/Resume Support - Queued Invalidation Intel IOMMU Suspend/Resume Support - DMAR intel-iommu: Add for_each_iommu() and for_each_active_iommu() macros
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/dmar.c71
-rw-r--r--drivers/pci/intel-iommu.c239
-rw-r--r--drivers/pci/intr_remapping.c84
3 files changed, 348 insertions, 46 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index d313039e2fdf..25a00ce4f24d 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -180,6 +180,7 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
180 dmaru->hdr = header; 180 dmaru->hdr = header;
181 drhd = (struct acpi_dmar_hardware_unit *)header; 181 drhd = (struct acpi_dmar_hardware_unit *)header;
182 dmaru->reg_base_addr = drhd->address; 182 dmaru->reg_base_addr = drhd->address;
183 dmaru->segment = drhd->segment;
183 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ 184 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
184 185
185 ret = alloc_iommu(dmaru); 186 ret = alloc_iommu(dmaru);
@@ -790,14 +791,41 @@ end:
790} 791}
791 792
792/* 793/*
794 * Enable queued invalidation.
795 */
796static void __dmar_enable_qi(struct intel_iommu *iommu)
797{
798 u32 cmd, sts;
799 unsigned long flags;
800 struct q_inval *qi = iommu->qi;
801
802 qi->free_head = qi->free_tail = 0;
803 qi->free_cnt = QI_LENGTH;
804
805 spin_lock_irqsave(&iommu->register_lock, flags);
806
807 /* write zero to the tail reg */
808 writel(0, iommu->reg + DMAR_IQT_REG);
809
810 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
811
812 cmd = iommu->gcmd | DMA_GCMD_QIE;
813 iommu->gcmd |= DMA_GCMD_QIE;
814 writel(cmd, iommu->reg + DMAR_GCMD_REG);
815
816 /* Make sure hardware complete it */
817 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
818
819 spin_unlock_irqrestore(&iommu->register_lock, flags);
820}
821
822/*
793 * Enable Queued Invalidation interface. This is a must to support 823 * Enable Queued Invalidation interface. This is a must to support
794 * interrupt-remapping. Also used by DMA-remapping, which replaces 824 * interrupt-remapping. Also used by DMA-remapping, which replaces
795 * register based IOTLB invalidation. 825 * register based IOTLB invalidation.
796 */ 826 */
797int dmar_enable_qi(struct intel_iommu *iommu) 827int dmar_enable_qi(struct intel_iommu *iommu)
798{ 828{
799 u32 cmd, sts;
800 unsigned long flags;
801 struct q_inval *qi; 829 struct q_inval *qi;
802 830
803 if (!ecap_qis(iommu->ecap)) 831 if (!ecap_qis(iommu->ecap))
@@ -835,19 +863,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
835 863
836 spin_lock_init(&qi->q_lock); 864 spin_lock_init(&qi->q_lock);
837 865
838 spin_lock_irqsave(&iommu->register_lock, flags); 866 __dmar_enable_qi(iommu);
839 /* write zero to the tail reg */
840 writel(0, iommu->reg + DMAR_IQT_REG);
841
842 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
843
844 cmd = iommu->gcmd | DMA_GCMD_QIE;
845 iommu->gcmd |= DMA_GCMD_QIE;
846 writel(cmd, iommu->reg + DMAR_GCMD_REG);
847
848 /* Make sure hardware complete it */
849 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
850 spin_unlock_irqrestore(&iommu->register_lock, flags);
851 867
852 return 0; 868 return 0;
853} 869}
@@ -1102,3 +1118,28 @@ int __init enable_drhd_fault_handling(void)
1102 1118
1103 return 0; 1119 return 0;
1104} 1120}
1121
1122/*
1123 * Re-enable Queued Invalidation interface.
1124 */
1125int dmar_reenable_qi(struct intel_iommu *iommu)
1126{
1127 if (!ecap_qis(iommu->ecap))
1128 return -ENOENT;
1129
1130 if (!iommu->qi)
1131 return -ENOENT;
1132
1133 /*
1134 * First disable queued invalidation.
1135 */
1136 dmar_disable_qi(iommu);
1137 /*
1138 * Then enable queued invalidation again. Since there is no pending
1139 * invalidation requests now, it's safe to re-enable queued
1140 * invalidation.
1141 */
1142 __dmar_enable_qi(iommu);
1143
1144 return 0;
1145}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 23e56a564e05..dcda5212f3bb 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -36,6 +36,7 @@
36#include <linux/iova.h> 36#include <linux/iova.h>
37#include <linux/iommu.h> 37#include <linux/iommu.h>
38#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
39#include <linux/sysdev.h>
39#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
40#include <asm/iommu.h> 41#include <asm/iommu.h>
41#include "pci.h" 42#include "pci.h"
@@ -247,7 +248,8 @@ struct dmar_domain {
247struct device_domain_info { 248struct device_domain_info {
248 struct list_head link; /* link to domain siblings */ 249 struct list_head link; /* link to domain siblings */
249 struct list_head global; /* link to global list */ 250 struct list_head global; /* link to global list */
250 u8 bus; /* PCI bus numer */ 251 int segment; /* PCI domain */
252 u8 bus; /* PCI bus number */
251 u8 devfn; /* PCI devfn number */ 253 u8 devfn; /* PCI devfn number */
252 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ 254 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
253 struct dmar_domain *domain; /* pointer to domain */ 255 struct dmar_domain *domain; /* pointer to domain */
@@ -467,7 +469,7 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
467 domain_update_iommu_snooping(domain); 469 domain_update_iommu_snooping(domain);
468} 470}
469 471
470static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn) 472static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
471{ 473{
472 struct dmar_drhd_unit *drhd = NULL; 474 struct dmar_drhd_unit *drhd = NULL;
473 int i; 475 int i;
@@ -475,12 +477,20 @@ static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
475 for_each_drhd_unit(drhd) { 477 for_each_drhd_unit(drhd) {
476 if (drhd->ignored) 478 if (drhd->ignored)
477 continue; 479 continue;
480 if (segment != drhd->segment)
481 continue;
478 482
479 for (i = 0; i < drhd->devices_cnt; i++) 483 for (i = 0; i < drhd->devices_cnt; i++) {
480 if (drhd->devices[i] && 484 if (drhd->devices[i] &&
481 drhd->devices[i]->bus->number == bus && 485 drhd->devices[i]->bus->number == bus &&
482 drhd->devices[i]->devfn == devfn) 486 drhd->devices[i]->devfn == devfn)
483 return drhd->iommu; 487 return drhd->iommu;
488 if (drhd->devices[i] &&
489 drhd->devices[i]->subordinate &&
490 drhd->devices[i]->subordinate->number <= bus &&
491 drhd->devices[i]->subordinate->subordinate >= bus)
492 return drhd->iommu;
493 }
484 494
485 if (drhd->include_all) 495 if (drhd->include_all)
486 return drhd->iommu; 496 return drhd->iommu;
@@ -1312,7 +1322,7 @@ static void domain_exit(struct dmar_domain *domain)
1312} 1322}
1313 1323
1314static int domain_context_mapping_one(struct dmar_domain *domain, 1324static int domain_context_mapping_one(struct dmar_domain *domain,
1315 u8 bus, u8 devfn) 1325 int segment, u8 bus, u8 devfn)
1316{ 1326{
1317 struct context_entry *context; 1327 struct context_entry *context;
1318 unsigned long flags; 1328 unsigned long flags;
@@ -1327,7 +1337,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1327 bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 1337 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1328 BUG_ON(!domain->pgd); 1338 BUG_ON(!domain->pgd);
1329 1339
1330 iommu = device_to_iommu(bus, devfn); 1340 iommu = device_to_iommu(segment, bus, devfn);
1331 if (!iommu) 1341 if (!iommu)
1332 return -ENODEV; 1342 return -ENODEV;
1333 1343
@@ -1417,8 +1427,8 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1417 int ret; 1427 int ret;
1418 struct pci_dev *tmp, *parent; 1428 struct pci_dev *tmp, *parent;
1419 1429
1420 ret = domain_context_mapping_one(domain, pdev->bus->number, 1430 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1421 pdev->devfn); 1431 pdev->bus->number, pdev->devfn);
1422 if (ret) 1432 if (ret)
1423 return ret; 1433 return ret;
1424 1434
@@ -1429,18 +1439,23 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1429 /* Secondary interface's bus number and devfn 0 */ 1439 /* Secondary interface's bus number and devfn 0 */
1430 parent = pdev->bus->self; 1440 parent = pdev->bus->self;
1431 while (parent != tmp) { 1441 while (parent != tmp) {
1432 ret = domain_context_mapping_one(domain, parent->bus->number, 1442 ret = domain_context_mapping_one(domain,
1433 parent->devfn); 1443 pci_domain_nr(parent->bus),
1444 parent->bus->number,
1445 parent->devfn);
1434 if (ret) 1446 if (ret)
1435 return ret; 1447 return ret;
1436 parent = parent->bus->self; 1448 parent = parent->bus->self;
1437 } 1449 }
1438 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ 1450 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1439 return domain_context_mapping_one(domain, 1451 return domain_context_mapping_one(domain,
1440 tmp->subordinate->number, 0); 1452 pci_domain_nr(tmp->subordinate),
1453 tmp->subordinate->number, 0);
1441 else /* this is a legacy PCI bridge */ 1454 else /* this is a legacy PCI bridge */
1442 return domain_context_mapping_one(domain, 1455 return domain_context_mapping_one(domain,
1443 tmp->bus->number, tmp->devfn); 1456 pci_domain_nr(tmp->bus),
1457 tmp->bus->number,
1458 tmp->devfn);
1444} 1459}
1445 1460
1446static int domain_context_mapped(struct pci_dev *pdev) 1461static int domain_context_mapped(struct pci_dev *pdev)
@@ -1449,12 +1464,12 @@ static int domain_context_mapped(struct pci_dev *pdev)
1449 struct pci_dev *tmp, *parent; 1464 struct pci_dev *tmp, *parent;
1450 struct intel_iommu *iommu; 1465 struct intel_iommu *iommu;
1451 1466
1452 iommu = device_to_iommu(pdev->bus->number, pdev->devfn); 1467 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1468 pdev->devfn);
1453 if (!iommu) 1469 if (!iommu)
1454 return -ENODEV; 1470 return -ENODEV;
1455 1471
1456 ret = device_context_mapped(iommu, 1472 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1457 pdev->bus->number, pdev->devfn);
1458 if (!ret) 1473 if (!ret)
1459 return ret; 1474 return ret;
1460 /* dependent device mapping */ 1475 /* dependent device mapping */
@@ -1465,17 +1480,17 @@ static int domain_context_mapped(struct pci_dev *pdev)
1465 parent = pdev->bus->self; 1480 parent = pdev->bus->self;
1466 while (parent != tmp) { 1481 while (parent != tmp) {
1467 ret = device_context_mapped(iommu, parent->bus->number, 1482 ret = device_context_mapped(iommu, parent->bus->number,
1468 parent->devfn); 1483 parent->devfn);
1469 if (!ret) 1484 if (!ret)
1470 return ret; 1485 return ret;
1471 parent = parent->bus->self; 1486 parent = parent->bus->self;
1472 } 1487 }
1473 if (tmp->is_pcie) 1488 if (tmp->is_pcie)
1474 return device_context_mapped(iommu, 1489 return device_context_mapped(iommu, tmp->subordinate->number,
1475 tmp->subordinate->number, 0); 1490 0);
1476 else 1491 else
1477 return device_context_mapped(iommu, 1492 return device_context_mapped(iommu, tmp->bus->number,
1478 tmp->bus->number, tmp->devfn); 1493 tmp->devfn);
1479} 1494}
1480 1495
1481static int 1496static int
@@ -1542,7 +1557,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
1542 info->dev->dev.archdata.iommu = NULL; 1557 info->dev->dev.archdata.iommu = NULL;
1543 spin_unlock_irqrestore(&device_domain_lock, flags); 1558 spin_unlock_irqrestore(&device_domain_lock, flags);
1544 1559
1545 iommu = device_to_iommu(info->bus, info->devfn); 1560 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1546 iommu_detach_dev(iommu, info->bus, info->devfn); 1561 iommu_detach_dev(iommu, info->bus, info->devfn);
1547 free_devinfo_mem(info); 1562 free_devinfo_mem(info);
1548 1563
@@ -1577,11 +1592,14 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1577 struct pci_dev *dev_tmp; 1592 struct pci_dev *dev_tmp;
1578 unsigned long flags; 1593 unsigned long flags;
1579 int bus = 0, devfn = 0; 1594 int bus = 0, devfn = 0;
1595 int segment;
1580 1596
1581 domain = find_domain(pdev); 1597 domain = find_domain(pdev);
1582 if (domain) 1598 if (domain)
1583 return domain; 1599 return domain;
1584 1600
1601 segment = pci_domain_nr(pdev->bus);
1602
1585 dev_tmp = pci_find_upstream_pcie_bridge(pdev); 1603 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1586 if (dev_tmp) { 1604 if (dev_tmp) {
1587 if (dev_tmp->is_pcie) { 1605 if (dev_tmp->is_pcie) {
@@ -1593,7 +1611,8 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1593 } 1611 }
1594 spin_lock_irqsave(&device_domain_lock, flags); 1612 spin_lock_irqsave(&device_domain_lock, flags);
1595 list_for_each_entry(info, &device_domain_list, global) { 1613 list_for_each_entry(info, &device_domain_list, global) {
1596 if (info->bus == bus && info->devfn == devfn) { 1614 if (info->segment == segment &&
1615 info->bus == bus && info->devfn == devfn) {
1597 found = info->domain; 1616 found = info->domain;
1598 break; 1617 break;
1599 } 1618 }
@@ -1631,6 +1650,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1631 domain_exit(domain); 1650 domain_exit(domain);
1632 goto error; 1651 goto error;
1633 } 1652 }
1653 info->segment = segment;
1634 info->bus = bus; 1654 info->bus = bus;
1635 info->devfn = devfn; 1655 info->devfn = devfn;
1636 info->dev = NULL; 1656 info->dev = NULL;
@@ -1642,7 +1662,8 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1642 found = NULL; 1662 found = NULL;
1643 spin_lock_irqsave(&device_domain_lock, flags); 1663 spin_lock_irqsave(&device_domain_lock, flags);
1644 list_for_each_entry(tmp, &device_domain_list, global) { 1664 list_for_each_entry(tmp, &device_domain_list, global) {
1645 if (tmp->bus == bus && tmp->devfn == devfn) { 1665 if (tmp->segment == segment &&
1666 tmp->bus == bus && tmp->devfn == devfn) {
1646 found = tmp->domain; 1667 found = tmp->domain;
1647 break; 1668 break;
1648 } 1669 }
@@ -1662,6 +1683,7 @@ found_domain:
1662 info = alloc_devinfo_mem(); 1683 info = alloc_devinfo_mem();
1663 if (!info) 1684 if (!info)
1664 goto error; 1685 goto error;
1686 info->segment = segment;
1665 info->bus = pdev->bus->number; 1687 info->bus = pdev->bus->number;
1666 info->devfn = pdev->devfn; 1688 info->devfn = pdev->devfn;
1667 info->dev = pdev; 1689 info->dev = pdev;
@@ -1946,6 +1968,15 @@ static int __init init_dmars(void)
1946 } 1968 }
1947 } 1969 }
1948 1970
1971#ifdef CONFIG_INTR_REMAP
1972 if (!intr_remapping_enabled) {
1973 ret = enable_intr_remapping(0);
1974 if (ret)
1975 printk(KERN_ERR
1976 "IOMMU: enable interrupt remapping failed\n");
1977 }
1978#endif
1979
1949 /* 1980 /*
1950 * For each rmrr 1981 * For each rmrr
1951 * for each dev attached to rmrr 1982 * for each dev attached to rmrr
@@ -2597,6 +2628,150 @@ static void __init init_no_remapping_devices(void)
2597 } 2628 }
2598} 2629}
2599 2630
2631#ifdef CONFIG_SUSPEND
2632static int init_iommu_hw(void)
2633{
2634 struct dmar_drhd_unit *drhd;
2635 struct intel_iommu *iommu = NULL;
2636
2637 for_each_active_iommu(iommu, drhd)
2638 if (iommu->qi)
2639 dmar_reenable_qi(iommu);
2640
2641 for_each_active_iommu(iommu, drhd) {
2642 iommu_flush_write_buffer(iommu);
2643
2644 iommu_set_root_entry(iommu);
2645
2646 iommu->flush.flush_context(iommu, 0, 0, 0,
2647 DMA_CCMD_GLOBAL_INVL, 0);
2648 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2649 DMA_TLB_GLOBAL_FLUSH, 0);
2650 iommu_disable_protect_mem_regions(iommu);
2651 iommu_enable_translation(iommu);
2652 }
2653
2654 return 0;
2655}
2656
2657static void iommu_flush_all(void)
2658{
2659 struct dmar_drhd_unit *drhd;
2660 struct intel_iommu *iommu;
2661
2662 for_each_active_iommu(iommu, drhd) {
2663 iommu->flush.flush_context(iommu, 0, 0, 0,
2664 DMA_CCMD_GLOBAL_INVL, 0);
2665 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2666 DMA_TLB_GLOBAL_FLUSH, 0);
2667 }
2668}
2669
2670static int iommu_suspend(struct sys_device *dev, pm_message_t state)
2671{
2672 struct dmar_drhd_unit *drhd;
2673 struct intel_iommu *iommu = NULL;
2674 unsigned long flag;
2675
2676 for_each_active_iommu(iommu, drhd) {
2677 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
2678 GFP_ATOMIC);
2679 if (!iommu->iommu_state)
2680 goto nomem;
2681 }
2682
2683 iommu_flush_all();
2684
2685 for_each_active_iommu(iommu, drhd) {
2686 iommu_disable_translation(iommu);
2687
2688 spin_lock_irqsave(&iommu->register_lock, flag);
2689
2690 iommu->iommu_state[SR_DMAR_FECTL_REG] =
2691 readl(iommu->reg + DMAR_FECTL_REG);
2692 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
2693 readl(iommu->reg + DMAR_FEDATA_REG);
2694 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
2695 readl(iommu->reg + DMAR_FEADDR_REG);
2696 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
2697 readl(iommu->reg + DMAR_FEUADDR_REG);
2698
2699 spin_unlock_irqrestore(&iommu->register_lock, flag);
2700 }
2701 return 0;
2702
2703nomem:
2704 for_each_active_iommu(iommu, drhd)
2705 kfree(iommu->iommu_state);
2706
2707 return -ENOMEM;
2708}
2709
2710static int iommu_resume(struct sys_device *dev)
2711{
2712 struct dmar_drhd_unit *drhd;
2713 struct intel_iommu *iommu = NULL;
2714 unsigned long flag;
2715
2716 if (init_iommu_hw()) {
2717 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
2718 return -EIO;
2719 }
2720
2721 for_each_active_iommu(iommu, drhd) {
2722
2723 spin_lock_irqsave(&iommu->register_lock, flag);
2724
2725 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
2726 iommu->reg + DMAR_FECTL_REG);
2727 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
2728 iommu->reg + DMAR_FEDATA_REG);
2729 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
2730 iommu->reg + DMAR_FEADDR_REG);
2731 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
2732 iommu->reg + DMAR_FEUADDR_REG);
2733
2734 spin_unlock_irqrestore(&iommu->register_lock, flag);
2735 }
2736
2737 for_each_active_iommu(iommu, drhd)
2738 kfree(iommu->iommu_state);
2739
2740 return 0;
2741}
2742
2743static struct sysdev_class iommu_sysclass = {
2744 .name = "iommu",
2745 .resume = iommu_resume,
2746 .suspend = iommu_suspend,
2747};
2748
2749static struct sys_device device_iommu = {
2750 .cls = &iommu_sysclass,
2751};
2752
2753static int __init init_iommu_sysfs(void)
2754{
2755 int error;
2756
2757 error = sysdev_class_register(&iommu_sysclass);
2758 if (error)
2759 return error;
2760
2761 error = sysdev_register(&device_iommu);
2762 if (error)
2763 sysdev_class_unregister(&iommu_sysclass);
2764
2765 return error;
2766}
2767
2768#else
2769static int __init init_iommu_sysfs(void)
2770{
2771 return 0;
2772}
2773#endif /* CONFIG_PM */
2774
2600int __init intel_iommu_init(void) 2775int __init intel_iommu_init(void)
2601{ 2776{
2602 int ret = 0; 2777 int ret = 0;
@@ -2632,6 +2807,7 @@ int __init intel_iommu_init(void)
2632 init_timer(&unmap_timer); 2807 init_timer(&unmap_timer);
2633 force_iommu = 1; 2808 force_iommu = 1;
2634 dma_ops = &intel_dma_ops; 2809 dma_ops = &intel_dma_ops;
2810 init_iommu_sysfs();
2635 2811
2636 register_iommu(&intel_iommu_ops); 2812 register_iommu(&intel_iommu_ops);
2637 2813
@@ -2648,6 +2824,7 @@ static int vm_domain_add_dev_info(struct dmar_domain *domain,
2648 if (!info) 2824 if (!info)
2649 return -ENOMEM; 2825 return -ENOMEM;
2650 2826
2827 info->segment = pci_domain_nr(pdev->bus);
2651 info->bus = pdev->bus->number; 2828 info->bus = pdev->bus->number;
2652 info->devfn = pdev->devfn; 2829 info->devfn = pdev->devfn;
2653 info->dev = pdev; 2830 info->dev = pdev;
@@ -2677,15 +2854,15 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
2677 parent = pdev->bus->self; 2854 parent = pdev->bus->self;
2678 while (parent != tmp) { 2855 while (parent != tmp) {
2679 iommu_detach_dev(iommu, parent->bus->number, 2856 iommu_detach_dev(iommu, parent->bus->number,
2680 parent->devfn); 2857 parent->devfn);
2681 parent = parent->bus->self; 2858 parent = parent->bus->self;
2682 } 2859 }
2683 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ 2860 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
2684 iommu_detach_dev(iommu, 2861 iommu_detach_dev(iommu,
2685 tmp->subordinate->number, 0); 2862 tmp->subordinate->number, 0);
2686 else /* this is a legacy PCI bridge */ 2863 else /* this is a legacy PCI bridge */
2687 iommu_detach_dev(iommu, 2864 iommu_detach_dev(iommu, tmp->bus->number,
2688 tmp->bus->number, tmp->devfn); 2865 tmp->devfn);
2689 } 2866 }
2690} 2867}
2691 2868
@@ -2698,13 +2875,15 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2698 int found = 0; 2875 int found = 0;
2699 struct list_head *entry, *tmp; 2876 struct list_head *entry, *tmp;
2700 2877
2701 iommu = device_to_iommu(pdev->bus->number, pdev->devfn); 2878 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
2879 pdev->devfn);
2702 if (!iommu) 2880 if (!iommu)
2703 return; 2881 return;
2704 2882
2705 spin_lock_irqsave(&device_domain_lock, flags); 2883 spin_lock_irqsave(&device_domain_lock, flags);
2706 list_for_each_safe(entry, tmp, &domain->devices) { 2884 list_for_each_safe(entry, tmp, &domain->devices) {
2707 info = list_entry(entry, struct device_domain_info, link); 2885 info = list_entry(entry, struct device_domain_info, link);
2886 /* No need to compare PCI domain; it has to be the same */
2708 if (info->bus == pdev->bus->number && 2887 if (info->bus == pdev->bus->number &&
2709 info->devfn == pdev->devfn) { 2888 info->devfn == pdev->devfn) {
2710 list_del(&info->link); 2889 list_del(&info->link);
@@ -2729,7 +2908,8 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2729 * owned by this domain, clear this iommu in iommu_bmp 2908 * owned by this domain, clear this iommu in iommu_bmp
2730 * update iommu count and coherency 2909 * update iommu count and coherency
2731 */ 2910 */
2732 if (device_to_iommu(info->bus, info->devfn) == iommu) 2911 if (iommu == device_to_iommu(info->segment, info->bus,
2912 info->devfn))
2733 found = 1; 2913 found = 1;
2734 } 2914 }
2735 2915
@@ -2762,7 +2942,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
2762 2942
2763 spin_unlock_irqrestore(&device_domain_lock, flags1); 2943 spin_unlock_irqrestore(&device_domain_lock, flags1);
2764 2944
2765 iommu = device_to_iommu(info->bus, info->devfn); 2945 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
2766 iommu_detach_dev(iommu, info->bus, info->devfn); 2946 iommu_detach_dev(iommu, info->bus, info->devfn);
2767 iommu_detach_dependent_devices(iommu, info->dev); 2947 iommu_detach_dependent_devices(iommu, info->dev);
2768 2948
@@ -2950,7 +3130,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
2950 } 3130 }
2951 } 3131 }
2952 3132
2953 iommu = device_to_iommu(pdev->bus->number, pdev->devfn); 3133 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3134 pdev->devfn);
2954 if (!iommu) 3135 if (!iommu)
2955 return -ENODEV; 3136 return -ENODEV;
2956 3137
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index b041a409f4a7..f5e0ea724a6f 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -9,6 +9,7 @@
9#include <asm/cpu.h> 9#include <asm/cpu.h>
10#include <linux/intel-iommu.h> 10#include <linux/intel-iommu.h>
11#include "intr_remapping.h" 11#include "intr_remapping.h"
12#include <acpi/acpi.h>
12 13
13static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; 14static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
14static int ir_ioapic_num; 15static int ir_ioapic_num;
@@ -415,12 +416,27 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
415 416
416 /* Set interrupt-remapping table pointer */ 417 /* Set interrupt-remapping table pointer */
417 cmd = iommu->gcmd | DMA_GCMD_SIRTP; 418 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
419 iommu->gcmd |= DMA_GCMD_SIRTP;
418 writel(cmd, iommu->reg + DMAR_GCMD_REG); 420 writel(cmd, iommu->reg + DMAR_GCMD_REG);
419 421
420 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 422 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
421 readl, (sts & DMA_GSTS_IRTPS), sts); 423 readl, (sts & DMA_GSTS_IRTPS), sts);
422 spin_unlock_irqrestore(&iommu->register_lock, flags); 424 spin_unlock_irqrestore(&iommu->register_lock, flags);
423 425
426 if (mode == 0) {
427 spin_lock_irqsave(&iommu->register_lock, flags);
428
429 /* enable comaptiblity format interrupt pass through */
430 cmd = iommu->gcmd | DMA_GCMD_CFI;
431 iommu->gcmd |= DMA_GCMD_CFI;
432 writel(cmd, iommu->reg + DMAR_GCMD_REG);
433
434 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
435 readl, (sts & DMA_GSTS_CFIS), sts);
436
437 spin_unlock_irqrestore(&iommu->register_lock, flags);
438 }
439
424 /* 440 /*
425 * global invalidation of interrupt entry cache before enabling 441 * global invalidation of interrupt entry cache before enabling
426 * interrupt-remapping. 442 * interrupt-remapping.
@@ -470,7 +486,7 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
470/* 486/*
471 * Disable Interrupt Remapping. 487 * Disable Interrupt Remapping.
472 */ 488 */
473static void disable_intr_remapping(struct intel_iommu *iommu) 489static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
474{ 490{
475 unsigned long flags; 491 unsigned long flags;
476 u32 sts; 492 u32 sts;
@@ -478,6 +494,12 @@ static void disable_intr_remapping(struct intel_iommu *iommu)
478 if (!ecap_ir_support(iommu->ecap)) 494 if (!ecap_ir_support(iommu->ecap))
479 return; 495 return;
480 496
497 /*
498 * global invalidation of interrupt entry cache before disabling
499 * interrupt-remapping.
500 */
501 qi_global_iec(iommu);
502
481 spin_lock_irqsave(&iommu->register_lock, flags); 503 spin_lock_irqsave(&iommu->register_lock, flags);
482 504
483 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 505 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
@@ -503,6 +525,13 @@ int __init enable_intr_remapping(int eim)
503 struct intel_iommu *iommu = drhd->iommu; 525 struct intel_iommu *iommu = drhd->iommu;
504 526
505 /* 527 /*
528 * If the queued invalidation is already initialized,
529 * shouldn't disable it.
530 */
531 if (iommu->qi)
532 continue;
533
534 /*
506 * Clear previous faults. 535 * Clear previous faults.
507 */ 536 */
508 dmar_fault(-1, iommu); 537 dmar_fault(-1, iommu);
@@ -511,7 +540,7 @@ int __init enable_intr_remapping(int eim)
511 * Disable intr remapping and queued invalidation, if already 540 * Disable intr remapping and queued invalidation, if already
512 * enabled prior to OS handover. 541 * enabled prior to OS handover.
513 */ 542 */
514 disable_intr_remapping(iommu); 543 iommu_disable_intr_remapping(iommu);
515 544
516 dmar_disable_qi(iommu); 545 dmar_disable_qi(iommu);
517 } 546 }
@@ -639,3 +668,54 @@ int __init parse_ioapics_under_ir(void)
639 668
640 return ir_supported; 669 return ir_supported;
641} 670}
671
672void disable_intr_remapping(void)
673{
674 struct dmar_drhd_unit *drhd;
675 struct intel_iommu *iommu = NULL;
676
677 /*
678 * Disable Interrupt-remapping for all the DRHD's now.
679 */
680 for_each_iommu(iommu, drhd) {
681 if (!ecap_ir_support(iommu->ecap))
682 continue;
683
684 iommu_disable_intr_remapping(iommu);
685 }
686}
687
688int reenable_intr_remapping(int eim)
689{
690 struct dmar_drhd_unit *drhd;
691 int setup = 0;
692 struct intel_iommu *iommu = NULL;
693
694 for_each_iommu(iommu, drhd)
695 if (iommu->qi)
696 dmar_reenable_qi(iommu);
697
698 /*
699 * Setup Interrupt-remapping for all the DRHD's now.
700 */
701 for_each_iommu(iommu, drhd) {
702 if (!ecap_ir_support(iommu->ecap))
703 continue;
704
705 /* Set up interrupt remapping for iommu.*/
706 iommu_set_intr_remapping(iommu, eim);
707 setup = 1;
708 }
709
710 if (!setup)
711 goto error;
712
713 return 0;
714
715error:
716 /*
717 * handle error condition gracefully here!
718 */
719 return -1;
720}
721