diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-08 13:44:33 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-08 13:44:33 -0400 |
| commit | 18f1837632783fec017fd932a812d383e3406af0 (patch) | |
| tree | f1d51043891abcccbfd5be0b13fe56ebf114108f | |
| parent | f4e70c2e5f1406e715f6359ae341e76e5004fb98 (diff) | |
| parent | 1f568357117cafb48aafb10413cbd1567dc015b1 (diff) | |
Merge tag 'iommu-updates-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel:
"Nothing big this time. In particular:
- Debugging code for Tegra-GART
- Improvement in Intel VT-d fault printing to prevent soft-lockups
when on fault storms
- Improvements in AMD IOMMU event reporting
- NUMA aware allocation in io-pgtable code for ARM
- Various other small fixes and cleanups all over the place"
* tag 'iommu-updates-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
iommu/io-pgtable-arm: Make allocations NUMA-aware
iommu/amd: Prevent possible null pointer dereference and infinite loop
iommu/amd: Fix grammar of comments
iommu: Clean up the comments for iommu_group_alloc
iommu/vt-d: Remove unnecessary parentheses
iommu/vt-d: Clean up pasid quirk for pre-production devices
iommu/vt-d: Clean up unused variable in find_or_alloc_domain
iommu/vt-d: Fix iotlb psi missing for mappings
iommu/vt-d: Introduce __mapping_notify_one()
iommu: Remove extra NULL check when call strtobool()
iommu/amd: Update logging information for new event type
iommu/amd: Update the PASID information printed to the system log
iommu/tegra: gart: Fix gart_iommu_unmap()
iommu/tegra: gart: Add debugging facility
iommu/io-pgtable-arm: Use for_each_set_bit to simplify code
iommu/qcom: Simplify getting .drvdata
iommu: Remove depends on HAS_DMA in case of platform dependency
iommu/vt-d: Ratelimit each dmar fault printing
| -rw-r--r-- | drivers/iommu/Kconfig | 5 | ||||
| -rw-r--r-- | drivers/iommu/amd_iommu.c | 69 | ||||
| -rw-r--r-- | drivers/iommu/amd_iommu_types.h | 1 | ||||
| -rw-r--r-- | drivers/iommu/dmar.c | 8 | ||||
| -rw-r--r-- | drivers/iommu/intel-iommu.c | 101 | ||||
| -rw-r--r-- | drivers/iommu/intel-svm.c | 2 | ||||
| -rw-r--r-- | drivers/iommu/io-pgtable-arm-v7s.c | 5 | ||||
| -rw-r--r-- | drivers/iommu/io-pgtable-arm.c | 18 | ||||
| -rw-r--r-- | drivers/iommu/iommu.c | 7 | ||||
| -rw-r--r-- | drivers/iommu/qcom_iommu.c | 6 | ||||
| -rw-r--r-- | drivers/iommu/tegra-gart.c | 15 | ||||
| -rw-r--r-- | include/linux/intel-iommu.h | 1 |
12 files changed, 122 insertions, 116 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 5b714a062fa7..8ea77efb2e29 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
| @@ -23,7 +23,7 @@ config IOMMU_IO_PGTABLE | |||
| 23 | config IOMMU_IO_PGTABLE_LPAE | 23 | config IOMMU_IO_PGTABLE_LPAE |
| 24 | bool "ARMv7/v8 Long Descriptor Format" | 24 | bool "ARMv7/v8 Long Descriptor Format" |
| 25 | select IOMMU_IO_PGTABLE | 25 | select IOMMU_IO_PGTABLE |
| 26 | depends on HAS_DMA && (ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)) | 26 | depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64) |
| 27 | help | 27 | help |
| 28 | Enable support for the ARM long descriptor pagetable format. | 28 | Enable support for the ARM long descriptor pagetable format. |
| 29 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page | 29 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page |
| @@ -42,7 +42,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST | |||
| 42 | config IOMMU_IO_PGTABLE_ARMV7S | 42 | config IOMMU_IO_PGTABLE_ARMV7S |
| 43 | bool "ARMv7/v8 Short Descriptor Format" | 43 | bool "ARMv7/v8 Short Descriptor Format" |
| 44 | select IOMMU_IO_PGTABLE | 44 | select IOMMU_IO_PGTABLE |
| 45 | depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST) | 45 | depends on ARM || ARM64 || COMPILE_TEST |
| 46 | help | 46 | help |
| 47 | Enable support for the ARM Short-descriptor pagetable format. | 47 | Enable support for the ARM Short-descriptor pagetable format. |
| 48 | This supports 32-bit virtual and physical addresses mapped using | 48 | This supports 32-bit virtual and physical addresses mapped using |
| @@ -377,7 +377,6 @@ config QCOM_IOMMU | |||
| 377 | # Note: iommu drivers cannot (yet?) be built as modules | 377 | # Note: iommu drivers cannot (yet?) be built as modules |
| 378 | bool "Qualcomm IOMMU Support" | 378 | bool "Qualcomm IOMMU Support" |
| 379 | depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64) | 379 | depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64) |
| 380 | depends on HAS_DMA | ||
| 381 | select IOMMU_API | 380 | select IOMMU_API |
| 382 | select IOMMU_IO_PGTABLE_LPAE | 381 | select IOMMU_IO_PGTABLE_LPAE |
| 383 | select ARM_DMA_USE_IOMMU | 382 | select ARM_DMA_USE_IOMMU |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d60c7dc62905..1912e9106fbe 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -547,7 +547,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id, | |||
| 547 | static void iommu_print_event(struct amd_iommu *iommu, void *__evt) | 547 | static void iommu_print_event(struct amd_iommu *iommu, void *__evt) |
| 548 | { | 548 | { |
| 549 | struct device *dev = iommu->iommu.dev; | 549 | struct device *dev = iommu->iommu.dev; |
| 550 | int type, devid, domid, flags; | 550 | int type, devid, pasid, flags, tag; |
| 551 | volatile u32 *event = __evt; | 551 | volatile u32 *event = __evt; |
| 552 | int count = 0; | 552 | int count = 0; |
| 553 | u64 address; | 553 | u64 address; |
| @@ -555,7 +555,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) | |||
| 555 | retry: | 555 | retry: |
| 556 | type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; | 556 | type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; |
| 557 | devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; | 557 | devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; |
| 558 | domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK; | 558 | pasid = PPR_PASID(*(u64 *)&event[0]); |
| 559 | flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; | 559 | flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; |
| 560 | address = (u64)(((u64)event[3]) << 32) | event[2]; | 560 | address = (u64)(((u64)event[3]) << 32) | event[2]; |
| 561 | 561 | ||
| @@ -570,7 +570,7 @@ retry: | |||
| 570 | } | 570 | } |
| 571 | 571 | ||
| 572 | if (type == EVENT_TYPE_IO_FAULT) { | 572 | if (type == EVENT_TYPE_IO_FAULT) { |
| 573 | amd_iommu_report_page_fault(devid, domid, address, flags); | 573 | amd_iommu_report_page_fault(devid, pasid, address, flags); |
| 574 | return; | 574 | return; |
| 575 | } else { | 575 | } else { |
| 576 | dev_err(dev, "AMD-Vi: Event logged ["); | 576 | dev_err(dev, "AMD-Vi: Event logged ["); |
| @@ -578,10 +578,9 @@ retry: | |||
| 578 | 578 | ||
| 579 | switch (type) { | 579 | switch (type) { |
| 580 | case EVENT_TYPE_ILL_DEV: | 580 | case EVENT_TYPE_ILL_DEV: |
| 581 | dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x " | 581 | dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n", |
| 582 | "address=0x%016llx flags=0x%04x]\n", | ||
| 583 | PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 582 | PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), |
| 584 | address, flags); | 583 | pasid, address, flags); |
| 585 | dump_dte_entry(devid); | 584 | dump_dte_entry(devid); |
| 586 | break; | 585 | break; |
| 587 | case EVENT_TYPE_DEV_TAB_ERR: | 586 | case EVENT_TYPE_DEV_TAB_ERR: |
| @@ -591,34 +590,38 @@ retry: | |||
| 591 | address, flags); | 590 | address, flags); |
| 592 | break; | 591 | break; |
| 593 | case EVENT_TYPE_PAGE_TAB_ERR: | 592 | case EVENT_TYPE_PAGE_TAB_ERR: |
| 594 | dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x " | 593 | dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%016llx flags=0x%04x]\n", |
| 595 | "domain=0x%04x address=0x%016llx flags=0x%04x]\n", | ||
| 596 | PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 594 | PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), |
| 597 | domid, address, flags); | 595 | pasid, address, flags); |
| 598 | break; | 596 | break; |
| 599 | case EVENT_TYPE_ILL_CMD: | 597 | case EVENT_TYPE_ILL_CMD: |
| 600 | dev_err(dev, "ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); | 598 | dev_err(dev, "ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); |
| 601 | dump_command(address); | 599 | dump_command(address); |
| 602 | break; | 600 | break; |
| 603 | case EVENT_TYPE_CMD_HARD_ERR: | 601 | case EVENT_TYPE_CMD_HARD_ERR: |
| 604 | dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx " | 602 | dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx flags=0x%04x]\n", |
| 605 | "flags=0x%04x]\n", address, flags); | 603 | address, flags); |
| 606 | break; | 604 | break; |
| 607 | case EVENT_TYPE_IOTLB_INV_TO: | 605 | case EVENT_TYPE_IOTLB_INV_TO: |
| 608 | dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x " | 606 | dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%016llx]\n", |
| 609 | "address=0x%016llx]\n", | ||
| 610 | PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 607 | PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), |
| 611 | address); | 608 | address); |
| 612 | break; | 609 | break; |
| 613 | case EVENT_TYPE_INV_DEV_REQ: | 610 | case EVENT_TYPE_INV_DEV_REQ: |
| 614 | dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x " | 611 | dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n", |
| 615 | "address=0x%016llx flags=0x%04x]\n", | ||
| 616 | PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 612 | PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), |
| 617 | address, flags); | 613 | pasid, address, flags); |
| 614 | break; | ||
| 615 | case EVENT_TYPE_INV_PPR_REQ: | ||
| 616 | pasid = ((event[0] >> 16) & 0xFFFF) | ||
| 617 | | ((event[1] << 6) & 0xF0000); | ||
| 618 | tag = event[1] & 0x03FF; | ||
| 619 | dev_err(dev, "INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n", | ||
| 620 | PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), | ||
| 621 | pasid, address, flags); | ||
| 618 | break; | 622 | break; |
| 619 | default: | 623 | default: |
| 620 | dev_err(dev, KERN_ERR "UNKNOWN event[0]=0x%08x event[1]=0x%08x " | 624 | dev_err(dev, "UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n", |
| 621 | "event[2]=0x%08x event[3]=0x%08x\n", | ||
| 622 | event[0], event[1], event[2], event[3]); | 625 | event[0], event[1], event[2], event[3]); |
| 623 | } | 626 | } |
| 624 | 627 | ||
| @@ -1914,15 +1917,6 @@ static void do_detach(struct iommu_dev_data *dev_data) | |||
| 1914 | struct amd_iommu *iommu; | 1917 | struct amd_iommu *iommu; |
| 1915 | u16 alias; | 1918 | u16 alias; |
| 1916 | 1919 | ||
| 1917 | /* | ||
| 1918 | * First check if the device is still attached. It might already | ||
| 1919 | * be detached from its domain because the generic | ||
| 1920 | * iommu_detach_group code detached it and we try again here in | ||
| 1921 | * our alias handling. | ||
| 1922 | */ | ||
| 1923 | if (!dev_data->domain) | ||
| 1924 | return; | ||
| 1925 | |||
| 1926 | iommu = amd_iommu_rlookup_table[dev_data->devid]; | 1920 | iommu = amd_iommu_rlookup_table[dev_data->devid]; |
| 1927 | alias = dev_data->alias; | 1921 | alias = dev_data->alias; |
| 1928 | 1922 | ||
| @@ -1942,8 +1936,8 @@ static void do_detach(struct iommu_dev_data *dev_data) | |||
| 1942 | } | 1936 | } |
| 1943 | 1937 | ||
| 1944 | /* | 1938 | /* |
| 1945 | * If a device is not yet associated with a domain, this function does | 1939 | * If a device is not yet associated with a domain, this function makes the |
| 1946 | * assigns it visible for the hardware | 1940 | * device visible in the domain |
| 1947 | */ | 1941 | */ |
| 1948 | static int __attach_device(struct iommu_dev_data *dev_data, | 1942 | static int __attach_device(struct iommu_dev_data *dev_data, |
| 1949 | struct protection_domain *domain) | 1943 | struct protection_domain *domain) |
| @@ -2064,8 +2058,8 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev) | |||
| 2064 | } | 2058 | } |
| 2065 | 2059 | ||
| 2066 | /* | 2060 | /* |
| 2067 | * If a device is not yet associated with a domain, this function | 2061 | * If a device is not yet associated with a domain, this function makes the |
| 2068 | * assigns it visible for the hardware | 2062 | * device visible in the domain |
| 2069 | */ | 2063 | */ |
| 2070 | static int attach_device(struct device *dev, | 2064 | static int attach_device(struct device *dev, |
| 2071 | struct protection_domain *domain) | 2065 | struct protection_domain *domain) |
| @@ -2127,9 +2121,6 @@ static void __detach_device(struct iommu_dev_data *dev_data) | |||
| 2127 | */ | 2121 | */ |
| 2128 | WARN_ON(!irqs_disabled()); | 2122 | WARN_ON(!irqs_disabled()); |
| 2129 | 2123 | ||
| 2130 | if (WARN_ON(!dev_data->domain)) | ||
| 2131 | return; | ||
| 2132 | |||
| 2133 | domain = dev_data->domain; | 2124 | domain = dev_data->domain; |
| 2134 | 2125 | ||
| 2135 | spin_lock(&domain->lock); | 2126 | spin_lock(&domain->lock); |
| @@ -2151,6 +2142,15 @@ static void detach_device(struct device *dev) | |||
| 2151 | dev_data = get_dev_data(dev); | 2142 | dev_data = get_dev_data(dev); |
| 2152 | domain = dev_data->domain; | 2143 | domain = dev_data->domain; |
| 2153 | 2144 | ||
| 2145 | /* | ||
| 2146 | * First check if the device is still attached. It might already | ||
| 2147 | * be detached from its domain because the generic | ||
| 2148 | * iommu_detach_group code detached it and we try again here in | ||
| 2149 | * our alias handling. | ||
| 2150 | */ | ||
| 2151 | if (WARN_ON(!dev_data->domain)) | ||
| 2152 | return; | ||
| 2153 | |||
| 2154 | /* lock device table */ | 2154 | /* lock device table */ |
| 2155 | spin_lock_irqsave(&amd_iommu_devtable_lock, flags); | 2155 | spin_lock_irqsave(&amd_iommu_devtable_lock, flags); |
| 2156 | __detach_device(dev_data); | 2156 | __detach_device(dev_data); |
| @@ -2796,6 +2796,7 @@ static void cleanup_domain(struct protection_domain *domain) | |||
| 2796 | while (!list_empty(&domain->dev_list)) { | 2796 | while (!list_empty(&domain->dev_list)) { |
| 2797 | entry = list_first_entry(&domain->dev_list, | 2797 | entry = list_first_entry(&domain->dev_list, |
| 2798 | struct iommu_dev_data, list); | 2798 | struct iommu_dev_data, list); |
| 2799 | BUG_ON(!entry->domain); | ||
| 2799 | __detach_device(entry); | 2800 | __detach_device(entry); |
| 2800 | } | 2801 | } |
| 2801 | 2802 | ||
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 1c9b080276c9..986cbe0cc189 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
| @@ -133,6 +133,7 @@ | |||
| 133 | #define EVENT_TYPE_CMD_HARD_ERR 0x6 | 133 | #define EVENT_TYPE_CMD_HARD_ERR 0x6 |
| 134 | #define EVENT_TYPE_IOTLB_INV_TO 0x7 | 134 | #define EVENT_TYPE_IOTLB_INV_TO 0x7 |
| 135 | #define EVENT_TYPE_INV_DEV_REQ 0x8 | 135 | #define EVENT_TYPE_INV_DEV_REQ 0x8 |
| 136 | #define EVENT_TYPE_INV_PPR_REQ 0x9 | ||
| 136 | #define EVENT_DEVID_MASK 0xffff | 137 | #define EVENT_DEVID_MASK 0xffff |
| 137 | #define EVENT_DEVID_SHIFT 0 | 138 | #define EVENT_DEVID_SHIFT 0 |
| 138 | #define EVENT_DOMID_MASK 0xffff | 139 | #define EVENT_DOMID_MASK 0xffff |
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 460bed4fc5b1..4321f7704b23 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
| @@ -1618,17 +1618,13 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
| 1618 | int reg, fault_index; | 1618 | int reg, fault_index; |
| 1619 | u32 fault_status; | 1619 | u32 fault_status; |
| 1620 | unsigned long flag; | 1620 | unsigned long flag; |
| 1621 | bool ratelimited; | ||
| 1622 | static DEFINE_RATELIMIT_STATE(rs, | 1621 | static DEFINE_RATELIMIT_STATE(rs, |
| 1623 | DEFAULT_RATELIMIT_INTERVAL, | 1622 | DEFAULT_RATELIMIT_INTERVAL, |
| 1624 | DEFAULT_RATELIMIT_BURST); | 1623 | DEFAULT_RATELIMIT_BURST); |
| 1625 | 1624 | ||
| 1626 | /* Disable printing, simply clear the fault when ratelimited */ | ||
| 1627 | ratelimited = !__ratelimit(&rs); | ||
| 1628 | |||
| 1629 | raw_spin_lock_irqsave(&iommu->register_lock, flag); | 1625 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1630 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | 1626 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
| 1631 | if (fault_status && !ratelimited) | 1627 | if (fault_status && __ratelimit(&rs)) |
| 1632 | pr_err("DRHD: handling fault status reg %x\n", fault_status); | 1628 | pr_err("DRHD: handling fault status reg %x\n", fault_status); |
| 1633 | 1629 | ||
| 1634 | /* TBD: ignore advanced fault log currently */ | 1630 | /* TBD: ignore advanced fault log currently */ |
| @@ -1638,6 +1634,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
| 1638 | fault_index = dma_fsts_fault_record_index(fault_status); | 1634 | fault_index = dma_fsts_fault_record_index(fault_status); |
| 1639 | reg = cap_fault_reg_offset(iommu->cap); | 1635 | reg = cap_fault_reg_offset(iommu->cap); |
| 1640 | while (1) { | 1636 | while (1) { |
| 1637 | /* Disable printing, simply clear the fault when ratelimited */ | ||
| 1638 | bool ratelimited = !__ratelimit(&rs); | ||
| 1641 | u8 fault_reason; | 1639 | u8 fault_reason; |
| 1642 | u16 source_id; | 1640 | u16 source_id; |
| 1643 | u64 guest_addr; | 1641 | u64 guest_addr; |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 772b404a6604..89e49a429c57 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -485,37 +485,14 @@ static int dmar_forcedac; | |||
| 485 | static int intel_iommu_strict; | 485 | static int intel_iommu_strict; |
| 486 | static int intel_iommu_superpage = 1; | 486 | static int intel_iommu_superpage = 1; |
| 487 | static int intel_iommu_ecs = 1; | 487 | static int intel_iommu_ecs = 1; |
| 488 | static int intel_iommu_pasid28; | ||
| 489 | static int iommu_identity_mapping; | 488 | static int iommu_identity_mapping; |
| 490 | 489 | ||
| 491 | #define IDENTMAP_ALL 1 | 490 | #define IDENTMAP_ALL 1 |
| 492 | #define IDENTMAP_GFX 2 | 491 | #define IDENTMAP_GFX 2 |
| 493 | #define IDENTMAP_AZALIA 4 | 492 | #define IDENTMAP_AZALIA 4 |
| 494 | 493 | ||
| 495 | /* Broadwell and Skylake have broken ECS support — normal so-called "second | 494 | #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap)) |
| 496 | * level" translation of DMA requests-without-PASID doesn't actually happen | 495 | #define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap)) |
| 497 | * unless you also set the NESTE bit in an extended context-entry. Which of | ||
| 498 | * course means that SVM doesn't work because it's trying to do nested | ||
| 499 | * translation of the physical addresses it finds in the process page tables, | ||
| 500 | * through the IOVA->phys mapping found in the "second level" page tables. | ||
| 501 | * | ||
| 502 | * The VT-d specification was retroactively changed to change the definition | ||
| 503 | * of the capability bits and pretend that Broadwell/Skylake never happened... | ||
| 504 | * but unfortunately the wrong bit was changed. It's ECS which is broken, but | ||
| 505 | * for some reason it was the PASID capability bit which was redefined (from | ||
| 506 | * bit 28 on BDW/SKL to bit 40 in future). | ||
| 507 | * | ||
| 508 | * So our test for ECS needs to eschew those implementations which set the old | ||
| 509 | * PASID capabiity bit 28, since those are the ones on which ECS is broken. | ||
| 510 | * Unless we are working around the 'pasid28' limitations, that is, by putting | ||
| 511 | * the device into passthrough mode for normal DMA and thus masking the bug. | ||
| 512 | */ | ||
| 513 | #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \ | ||
| 514 | (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap))) | ||
| 515 | /* PASID support is thus enabled if ECS is enabled and *either* of the old | ||
| 516 | * or new capability bits are set. */ | ||
| 517 | #define pasid_enabled(iommu) (ecs_enabled(iommu) && \ | ||
| 518 | (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap))) | ||
| 519 | 496 | ||
| 520 | int intel_iommu_gfx_mapped; | 497 | int intel_iommu_gfx_mapped; |
| 521 | EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); | 498 | EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); |
| @@ -578,11 +555,6 @@ static int __init intel_iommu_setup(char *str) | |||
| 578 | printk(KERN_INFO | 555 | printk(KERN_INFO |
| 579 | "Intel-IOMMU: disable extended context table support\n"); | 556 | "Intel-IOMMU: disable extended context table support\n"); |
| 580 | intel_iommu_ecs = 0; | 557 | intel_iommu_ecs = 0; |
| 581 | } else if (!strncmp(str, "pasid28", 7)) { | ||
| 582 | printk(KERN_INFO | ||
| 583 | "Intel-IOMMU: enable pre-production PASID support\n"); | ||
| 584 | intel_iommu_pasid28 = 1; | ||
| 585 | iommu_identity_mapping |= IDENTMAP_GFX; | ||
| 586 | } else if (!strncmp(str, "tboot_noforce", 13)) { | 558 | } else if (!strncmp(str, "tboot_noforce", 13)) { |
| 587 | printk(KERN_INFO | 559 | printk(KERN_INFO |
| 588 | "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); | 560 | "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); |
| @@ -1606,6 +1578,18 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, | |||
| 1606 | iommu_flush_dev_iotlb(domain, addr, mask); | 1578 | iommu_flush_dev_iotlb(domain, addr, mask); |
| 1607 | } | 1579 | } |
| 1608 | 1580 | ||
| 1581 | /* Notification for newly created mappings */ | ||
| 1582 | static inline void __mapping_notify_one(struct intel_iommu *iommu, | ||
| 1583 | struct dmar_domain *domain, | ||
| 1584 | unsigned long pfn, unsigned int pages) | ||
| 1585 | { | ||
| 1586 | /* It's a non-present to present mapping. Only flush if caching mode */ | ||
| 1587 | if (cap_caching_mode(iommu->cap)) | ||
| 1588 | iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); | ||
| 1589 | else | ||
| 1590 | iommu_flush_write_buffer(iommu); | ||
| 1591 | } | ||
| 1592 | |||
| 1609 | static void iommu_flush_iova(struct iova_domain *iovad) | 1593 | static void iommu_flush_iova(struct iova_domain *iovad) |
| 1610 | { | 1594 | { |
| 1611 | struct dmar_domain *domain; | 1595 | struct dmar_domain *domain; |
| @@ -2340,18 +2324,47 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
| 2340 | return 0; | 2324 | return 0; |
| 2341 | } | 2325 | } |
| 2342 | 2326 | ||
| 2327 | static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | ||
| 2328 | struct scatterlist *sg, unsigned long phys_pfn, | ||
| 2329 | unsigned long nr_pages, int prot) | ||
| 2330 | { | ||
| 2331 | int ret; | ||
| 2332 | struct intel_iommu *iommu; | ||
| 2333 | |||
| 2334 | /* Do the real mapping first */ | ||
| 2335 | ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot); | ||
| 2336 | if (ret) | ||
| 2337 | return ret; | ||
| 2338 | |||
| 2339 | /* Notify about the new mapping */ | ||
| 2340 | if (domain_type_is_vm(domain)) { | ||
| 2341 | /* VM typed domains can have more than one IOMMUs */ | ||
| 2342 | int iommu_id; | ||
| 2343 | for_each_domain_iommu(iommu_id, domain) { | ||
| 2344 | iommu = g_iommus[iommu_id]; | ||
| 2345 | __mapping_notify_one(iommu, domain, iov_pfn, nr_pages); | ||
| 2346 | } | ||
| 2347 | } else { | ||
| 2348 | /* General domains only have one IOMMU */ | ||
| 2349 | iommu = domain_get_iommu(domain); | ||
| 2350 | __mapping_notify_one(iommu, domain, iov_pfn, nr_pages); | ||
| 2351 | } | ||
| 2352 | |||
| 2353 | return 0; | ||
| 2354 | } | ||
| 2355 | |||
| 2343 | static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | 2356 | static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, |
| 2344 | struct scatterlist *sg, unsigned long nr_pages, | 2357 | struct scatterlist *sg, unsigned long nr_pages, |
| 2345 | int prot) | 2358 | int prot) |
| 2346 | { | 2359 | { |
| 2347 | return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); | 2360 | return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); |
| 2348 | } | 2361 | } |
| 2349 | 2362 | ||
| 2350 | static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | 2363 | static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, |
| 2351 | unsigned long phys_pfn, unsigned long nr_pages, | 2364 | unsigned long phys_pfn, unsigned long nr_pages, |
| 2352 | int prot) | 2365 | int prot) |
| 2353 | { | 2366 | { |
| 2354 | return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); | 2367 | return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); |
| 2355 | } | 2368 | } |
| 2356 | 2369 | ||
| 2357 | static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) | 2370 | static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) |
| @@ -2534,7 +2547,7 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) | |||
| 2534 | struct device_domain_info *info = NULL; | 2547 | struct device_domain_info *info = NULL; |
| 2535 | struct dmar_domain *domain = NULL; | 2548 | struct dmar_domain *domain = NULL; |
| 2536 | struct intel_iommu *iommu; | 2549 | struct intel_iommu *iommu; |
| 2537 | u16 req_id, dma_alias; | 2550 | u16 dma_alias; |
| 2538 | unsigned long flags; | 2551 | unsigned long flags; |
| 2539 | u8 bus, devfn; | 2552 | u8 bus, devfn; |
| 2540 | 2553 | ||
| @@ -2542,8 +2555,6 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) | |||
| 2542 | if (!iommu) | 2555 | if (!iommu) |
| 2543 | return NULL; | 2556 | return NULL; |
| 2544 | 2557 | ||
| 2545 | req_id = ((u16)bus << 8) | devfn; | ||
| 2546 | |||
| 2547 | if (dev_is_pci(dev)) { | 2558 | if (dev_is_pci(dev)) { |
| 2548 | struct pci_dev *pdev = to_pci_dev(dev); | 2559 | struct pci_dev *pdev = to_pci_dev(dev); |
| 2549 | 2560 | ||
| @@ -2657,9 +2668,9 @@ static int iommu_domain_identity_map(struct dmar_domain *domain, | |||
| 2657 | */ | 2668 | */ |
| 2658 | dma_pte_clear_range(domain, first_vpfn, last_vpfn); | 2669 | dma_pte_clear_range(domain, first_vpfn, last_vpfn); |
| 2659 | 2670 | ||
| 2660 | return domain_pfn_mapping(domain, first_vpfn, first_vpfn, | 2671 | return __domain_mapping(domain, first_vpfn, NULL, |
| 2661 | last_vpfn - first_vpfn + 1, | 2672 | first_vpfn, last_vpfn - first_vpfn + 1, |
| 2662 | DMA_PTE_READ|DMA_PTE_WRITE); | 2673 | DMA_PTE_READ|DMA_PTE_WRITE); |
| 2663 | } | 2674 | } |
| 2664 | 2675 | ||
| 2665 | static int domain_prepare_identity_map(struct device *dev, | 2676 | static int domain_prepare_identity_map(struct device *dev, |
| @@ -3626,14 +3637,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, | |||
| 3626 | if (ret) | 3637 | if (ret) |
| 3627 | goto error; | 3638 | goto error; |
| 3628 | 3639 | ||
| 3629 | /* it's a non-present to present mapping. Only flush if caching mode */ | ||
| 3630 | if (cap_caching_mode(iommu->cap)) | ||
| 3631 | iommu_flush_iotlb_psi(iommu, domain, | ||
| 3632 | mm_to_dma_pfn(iova_pfn), | ||
| 3633 | size, 0, 1); | ||
| 3634 | else | ||
| 3635 | iommu_flush_write_buffer(iommu); | ||
| 3636 | |||
| 3637 | start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT; | 3640 | start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT; |
| 3638 | start_paddr += paddr & ~PAGE_MASK; | 3641 | start_paddr += paddr & ~PAGE_MASK; |
| 3639 | return start_paddr; | 3642 | return start_paddr; |
| @@ -3820,12 +3823,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele | |||
| 3820 | return 0; | 3823 | return 0; |
| 3821 | } | 3824 | } |
| 3822 | 3825 | ||
| 3823 | /* it's a non-present to present mapping. Only flush if caching mode */ | ||
| 3824 | if (cap_caching_mode(iommu->cap)) | ||
| 3825 | iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1); | ||
| 3826 | else | ||
| 3827 | iommu_flush_write_buffer(iommu); | ||
| 3828 | |||
| 3829 | return nelems; | 3826 | return nelems; |
| 3830 | } | 3827 | } |
| 3831 | 3828 | ||
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index e8cd984cf9c8..45f6e581cd56 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c | |||
| @@ -319,7 +319,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ | |||
| 319 | } else | 319 | } else |
| 320 | pasid_max = 1 << 20; | 320 | pasid_max = 1 << 20; |
| 321 | 321 | ||
| 322 | if ((flags & SVM_FLAG_SUPERVISOR_MODE)) { | 322 | if (flags & SVM_FLAG_SUPERVISOR_MODE) { |
| 323 | if (!ecap_srs(iommu->ecap)) | 323 | if (!ecap_srs(iommu->ecap)) |
| 324 | return -EINVAL; | 324 | return -EINVAL; |
| 325 | } else if (pasid) { | 325 | } else if (pasid) { |
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 10e4a3d11c02..50e3a9fcf43e 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c | |||
| @@ -898,8 +898,7 @@ static int __init arm_v7s_do_selftests(void) | |||
| 898 | 898 | ||
| 899 | /* Full unmap */ | 899 | /* Full unmap */ |
| 900 | iova = 0; | 900 | iova = 0; |
| 901 | i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG); | 901 | for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) { |
| 902 | while (i != BITS_PER_LONG) { | ||
| 903 | size = 1UL << i; | 902 | size = 1UL << i; |
| 904 | 903 | ||
| 905 | if (ops->unmap(ops, iova, size) != size) | 904 | if (ops->unmap(ops, iova, size) != size) |
| @@ -916,8 +915,6 @@ static int __init arm_v7s_do_selftests(void) | |||
| 916 | return __FAIL(ops); | 915 | return __FAIL(ops); |
| 917 | 916 | ||
| 918 | iova += SZ_16M; | 917 | iova += SZ_16M; |
| 919 | i++; | ||
| 920 | i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i); | ||
| 921 | } | 918 | } |
| 922 | 919 | ||
| 923 | free_io_pgtable_ops(ops); | 920 | free_io_pgtable_ops(ops); |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 39c2a056da21..010a254305dd 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
| @@ -231,12 +231,17 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, | |||
| 231 | struct io_pgtable_cfg *cfg) | 231 | struct io_pgtable_cfg *cfg) |
| 232 | { | 232 | { |
| 233 | struct device *dev = cfg->iommu_dev; | 233 | struct device *dev = cfg->iommu_dev; |
| 234 | int order = get_order(size); | ||
| 235 | struct page *p; | ||
| 234 | dma_addr_t dma; | 236 | dma_addr_t dma; |
| 235 | void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO); | 237 | void *pages; |
| 236 | 238 | ||
| 237 | if (!pages) | 239 | VM_BUG_ON((gfp & __GFP_HIGHMEM)); |
| 240 | p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order); | ||
| 241 | if (!p) | ||
| 238 | return NULL; | 242 | return NULL; |
| 239 | 243 | ||
| 244 | pages = page_address(p); | ||
| 240 | if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { | 245 | if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { |
| 241 | dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); | 246 | dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); |
| 242 | if (dma_mapping_error(dev, dma)) | 247 | if (dma_mapping_error(dev, dma)) |
| @@ -256,7 +261,7 @@ out_unmap: | |||
| 256 | dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); | 261 | dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); |
| 257 | dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); | 262 | dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); |
| 258 | out_free: | 263 | out_free: |
| 259 | free_pages_exact(pages, size); | 264 | __free_pages(p, order); |
| 260 | return NULL; | 265 | return NULL; |
| 261 | } | 266 | } |
| 262 | 267 | ||
| @@ -266,7 +271,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size, | |||
| 266 | if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) | 271 | if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) |
| 267 | dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), | 272 | dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), |
| 268 | size, DMA_TO_DEVICE); | 273 | size, DMA_TO_DEVICE); |
| 269 | free_pages_exact(pages, size); | 274 | free_pages((unsigned long)pages, get_order(size)); |
| 270 | } | 275 | } |
| 271 | 276 | ||
| 272 | static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, | 277 | static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, |
| @@ -1120,8 +1125,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) | |||
| 1120 | 1125 | ||
| 1121 | /* Full unmap */ | 1126 | /* Full unmap */ |
| 1122 | iova = 0; | 1127 | iova = 0; |
| 1123 | j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); | 1128 | for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { |
| 1124 | while (j != BITS_PER_LONG) { | ||
| 1125 | size = 1UL << j; | 1129 | size = 1UL << j; |
| 1126 | 1130 | ||
| 1127 | if (ops->unmap(ops, iova, size) != size) | 1131 | if (ops->unmap(ops, iova, size) != size) |
| @@ -1138,8 +1142,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) | |||
| 1138 | return __FAIL(ops, i); | 1142 | return __FAIL(ops, i); |
| 1139 | 1143 | ||
| 1140 | iova += SZ_1G; | 1144 | iova += SZ_1G; |
| 1141 | j++; | ||
| 1142 | j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); | ||
| 1143 | } | 1145 | } |
| 1144 | 1146 | ||
| 1145 | free_io_pgtable_ops(ops); | 1147 | free_io_pgtable_ops(ops); |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d2aa23202bb9..63b37563db7e 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
| @@ -116,9 +116,11 @@ static void __iommu_detach_group(struct iommu_domain *domain, | |||
| 116 | static int __init iommu_set_def_domain_type(char *str) | 116 | static int __init iommu_set_def_domain_type(char *str) |
| 117 | { | 117 | { |
| 118 | bool pt; | 118 | bool pt; |
| 119 | int ret; | ||
| 119 | 120 | ||
| 120 | if (!str || strtobool(str, &pt)) | 121 | ret = kstrtobool(str, &pt); |
| 121 | return -EINVAL; | 122 | if (ret) |
| 123 | return ret; | ||
| 122 | 124 | ||
| 123 | iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; | 125 | iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; |
| 124 | return 0; | 126 | return 0; |
| @@ -322,7 +324,6 @@ static struct kobj_type iommu_group_ktype = { | |||
| 322 | 324 | ||
| 323 | /** | 325 | /** |
| 324 | * iommu_group_alloc - Allocate a new group | 326 | * iommu_group_alloc - Allocate a new group |
| 325 | * @name: Optional name to associate with group, visible in sysfs | ||
| 326 | * | 327 | * |
| 327 | * This function is called by an iommu driver to allocate a new iommu | 328 | * This function is called by an iommu driver to allocate a new iommu |
| 328 | * group. The iommu group represents the minimum granularity of the iommu. | 329 | * group. The iommu group represents the minimum granularity of the iommu. |
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 65b9c99707f8..fe88a4880d3a 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c | |||
| @@ -885,16 +885,14 @@ static int qcom_iommu_device_remove(struct platform_device *pdev) | |||
| 885 | 885 | ||
| 886 | static int __maybe_unused qcom_iommu_resume(struct device *dev) | 886 | static int __maybe_unused qcom_iommu_resume(struct device *dev) |
| 887 | { | 887 | { |
| 888 | struct platform_device *pdev = to_platform_device(dev); | 888 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev); |
| 889 | struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev); | ||
| 890 | 889 | ||
| 891 | return qcom_iommu_enable_clocks(qcom_iommu); | 890 | return qcom_iommu_enable_clocks(qcom_iommu); |
| 892 | } | 891 | } |
| 893 | 892 | ||
| 894 | static int __maybe_unused qcom_iommu_suspend(struct device *dev) | 893 | static int __maybe_unused qcom_iommu_suspend(struct device *dev) |
| 895 | { | 894 | { |
| 896 | struct platform_device *pdev = to_platform_device(dev); | 895 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev); |
| 897 | struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev); | ||
| 898 | 896 | ||
| 899 | qcom_iommu_disable_clocks(qcom_iommu); | 897 | qcom_iommu_disable_clocks(qcom_iommu); |
| 900 | 898 | ||
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index b62f790ad1ba..89ec24c6952c 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c | |||
| @@ -72,6 +72,8 @@ struct gart_domain { | |||
| 72 | 72 | ||
| 73 | static struct gart_device *gart_handle; /* unique for a system */ | 73 | static struct gart_device *gart_handle; /* unique for a system */ |
| 74 | 74 | ||
| 75 | static bool gart_debug; | ||
| 76 | |||
| 75 | #define GART_PTE(_pfn) \ | 77 | #define GART_PTE(_pfn) \ |
| 76 | (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) | 78 | (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) |
| 77 | 79 | ||
| @@ -271,6 +273,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
| 271 | struct gart_device *gart = gart_domain->gart; | 273 | struct gart_device *gart = gart_domain->gart; |
| 272 | unsigned long flags; | 274 | unsigned long flags; |
| 273 | unsigned long pfn; | 275 | unsigned long pfn; |
| 276 | unsigned long pte; | ||
| 274 | 277 | ||
| 275 | if (!gart_iova_range_valid(gart, iova, bytes)) | 278 | if (!gart_iova_range_valid(gart, iova, bytes)) |
| 276 | return -EINVAL; | 279 | return -EINVAL; |
| @@ -282,6 +285,14 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
| 282 | spin_unlock_irqrestore(&gart->pte_lock, flags); | 285 | spin_unlock_irqrestore(&gart->pte_lock, flags); |
| 283 | return -EINVAL; | 286 | return -EINVAL; |
| 284 | } | 287 | } |
| 288 | if (gart_debug) { | ||
| 289 | pte = gart_read_pte(gart, iova); | ||
| 290 | if (pte & GART_ENTRY_PHYS_ADDR_VALID) { | ||
| 291 | spin_unlock_irqrestore(&gart->pte_lock, flags); | ||
| 292 | dev_err(gart->dev, "Page entry is in-use\n"); | ||
| 293 | return -EBUSY; | ||
| 294 | } | ||
| 295 | } | ||
| 285 | gart_set_pte(gart, iova, GART_PTE(pfn)); | 296 | gart_set_pte(gart, iova, GART_PTE(pfn)); |
| 286 | FLUSH_GART_REGS(gart); | 297 | FLUSH_GART_REGS(gart); |
| 287 | spin_unlock_irqrestore(&gart->pte_lock, flags); | 298 | spin_unlock_irqrestore(&gart->pte_lock, flags); |
| @@ -302,7 +313,7 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |||
| 302 | gart_set_pte(gart, iova, 0); | 313 | gart_set_pte(gart, iova, 0); |
| 303 | FLUSH_GART_REGS(gart); | 314 | FLUSH_GART_REGS(gart); |
| 304 | spin_unlock_irqrestore(&gart->pte_lock, flags); | 315 | spin_unlock_irqrestore(&gart->pte_lock, flags); |
| 305 | return 0; | 316 | return bytes; |
| 306 | } | 317 | } |
| 307 | 318 | ||
| 308 | static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, | 319 | static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, |
| @@ -515,7 +526,9 @@ static void __exit tegra_gart_exit(void) | |||
| 515 | 526 | ||
| 516 | subsys_initcall(tegra_gart_init); | 527 | subsys_initcall(tegra_gart_init); |
| 517 | module_exit(tegra_gart_exit); | 528 | module_exit(tegra_gart_exit); |
| 529 | module_param(gart_debug, bool, 0644); | ||
| 518 | 530 | ||
| 531 | MODULE_PARM_DESC(gart_debug, "Enable GART debugging"); | ||
| 519 | MODULE_DESCRIPTION("IOMMU API for GART in Tegra20"); | 532 | MODULE_DESCRIPTION("IOMMU API for GART in Tegra20"); |
| 520 | MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); | 533 | MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); |
| 521 | MODULE_ALIAS("platform:tegra-gart"); | 534 | MODULE_ALIAS("platform:tegra-gart"); |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index ef169d67df92..1df940196ab2 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
| @@ -121,7 +121,6 @@ | |||
| 121 | #define ecap_srs(e) ((e >> 31) & 0x1) | 121 | #define ecap_srs(e) ((e >> 31) & 0x1) |
| 122 | #define ecap_ers(e) ((e >> 30) & 0x1) | 122 | #define ecap_ers(e) ((e >> 30) & 0x1) |
| 123 | #define ecap_prs(e) ((e >> 29) & 0x1) | 123 | #define ecap_prs(e) ((e >> 29) & 0x1) |
| 124 | #define ecap_broken_pasid(e) ((e >> 28) & 0x1) | ||
| 125 | #define ecap_dis(e) ((e >> 27) & 0x1) | 124 | #define ecap_dis(e) ((e >> 27) & 0x1) |
| 126 | #define ecap_nest(e) ((e >> 26) & 0x1) | 125 | #define ecap_nest(e) ((e >> 26) & 0x1) |
| 127 | #define ecap_mts(e) ((e >> 25) & 0x1) | 126 | #define ecap_mts(e) ((e >> 25) & 0x1) |
