diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-06 12:47:57 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-06 12:47:57 -0400 |
| commit | 9e68447f5b38d6092a6feeeb713a6564e963e68b (patch) | |
| tree | ff38db3b4fd92c6c747718b1c0c79ccf2f8aa479 | |
| parent | eea5b5510fc5545d15b69da8e485a7424ae388cf (diff) | |
| parent | eee53537c476c947bf7faa1c916d2f5a0ae8ec93 (diff) | |
Merge tag 'iommu-fixes-3.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU fixes from Joerg Roedel:
"Two patches are in here which fix AMD IOMMU specific issues. One
patch fixes a long-standing warning on resume because the
amd_iommu_resume function enabled interrupts. The other patch fixes a
deadlock in an error-path of the page-fault request handling code of
the IOMMU driver.
* tag 'iommu-fixes-3.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
iommu/amd: Fix deadlock in ppr-handling error path
iommu/amd: Cache pdev pointer to root-bridge
| -rw-r--r-- | drivers/iommu/amd_iommu.c | 71 | ||||
| -rw-r--r-- | drivers/iommu/amd_iommu_init.c | 13 | ||||
| -rw-r--r-- | drivers/iommu/amd_iommu_types.h | 3 |
3 files changed, 52 insertions, 35 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d90a421e9cac..a2e418cba0ff 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -547,26 +547,12 @@ static void iommu_poll_events(struct amd_iommu *iommu) | |||
| 547 | spin_unlock_irqrestore(&iommu->lock, flags); | 547 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 548 | } | 548 | } |
| 549 | 549 | ||
| 550 | static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) | 550 | static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) |
| 551 | { | 551 | { |
| 552 | struct amd_iommu_fault fault; | 552 | struct amd_iommu_fault fault; |
| 553 | volatile u64 *raw; | ||
| 554 | int i; | ||
| 555 | 553 | ||
| 556 | INC_STATS_COUNTER(pri_requests); | 554 | INC_STATS_COUNTER(pri_requests); |
| 557 | 555 | ||
| 558 | raw = (u64 *)(iommu->ppr_log + head); | ||
| 559 | |||
| 560 | /* | ||
| 561 | * Hardware bug: Interrupt may arrive before the entry is written to | ||
| 562 | * memory. If this happens we need to wait for the entry to arrive. | ||
| 563 | */ | ||
| 564 | for (i = 0; i < LOOP_TIMEOUT; ++i) { | ||
| 565 | if (PPR_REQ_TYPE(raw[0]) != 0) | ||
| 566 | break; | ||
| 567 | udelay(1); | ||
| 568 | } | ||
| 569 | |||
| 570 | if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { | 556 | if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { |
| 571 | pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); | 557 | pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); |
| 572 | return; | 558 | return; |
| @@ -578,12 +564,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) | |||
| 578 | fault.tag = PPR_TAG(raw[0]); | 564 | fault.tag = PPR_TAG(raw[0]); |
| 579 | fault.flags = PPR_FLAGS(raw[0]); | 565 | fault.flags = PPR_FLAGS(raw[0]); |
| 580 | 566 | ||
| 581 | /* | ||
| 582 | * To detect the hardware bug we need to clear the entry | ||
| 583 | * to back to zero. | ||
| 584 | */ | ||
| 585 | raw[0] = raw[1] = 0; | ||
| 586 | |||
| 587 | atomic_notifier_call_chain(&ppr_notifier, 0, &fault); | 567 | atomic_notifier_call_chain(&ppr_notifier, 0, &fault); |
| 588 | } | 568 | } |
| 589 | 569 | ||
| @@ -595,25 +575,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) | |||
| 595 | if (iommu->ppr_log == NULL) | 575 | if (iommu->ppr_log == NULL) |
| 596 | return; | 576 | return; |
| 597 | 577 | ||
| 578 | /* enable ppr interrupts again */ | ||
| 579 | writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
| 580 | |||
| 598 | spin_lock_irqsave(&iommu->lock, flags); | 581 | spin_lock_irqsave(&iommu->lock, flags); |
| 599 | 582 | ||
| 600 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | 583 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
| 601 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | 584 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
| 602 | 585 | ||
| 603 | while (head != tail) { | 586 | while (head != tail) { |
| 587 | volatile u64 *raw; | ||
| 588 | u64 entry[2]; | ||
| 589 | int i; | ||
| 604 | 590 | ||
| 605 | /* Handle PPR entry */ | 591 | raw = (u64 *)(iommu->ppr_log + head); |
| 606 | iommu_handle_ppr_entry(iommu, head); | 592 | |
| 593 | /* | ||
| 594 | * Hardware bug: Interrupt may arrive before the entry is | ||
| 595 | * written to memory. If this happens we need to wait for the | ||
| 596 | * entry to arrive. | ||
| 597 | */ | ||
| 598 | for (i = 0; i < LOOP_TIMEOUT; ++i) { | ||
| 599 | if (PPR_REQ_TYPE(raw[0]) != 0) | ||
| 600 | break; | ||
| 601 | udelay(1); | ||
| 602 | } | ||
| 603 | |||
| 604 | /* Avoid memcpy function-call overhead */ | ||
| 605 | entry[0] = raw[0]; | ||
| 606 | entry[1] = raw[1]; | ||
| 607 | 607 | ||
| 608 | /* Update and refresh ring-buffer state*/ | 608 | /* |
| 609 | * To detect the hardware bug we need to clear the entry | ||
| 610 | * back to zero. | ||
| 611 | */ | ||
| 612 | raw[0] = raw[1] = 0UL; | ||
| 613 | |||
| 614 | /* Update head pointer of hardware ring-buffer */ | ||
| 609 | head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; | 615 | head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; |
| 610 | writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | 616 | writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
| 617 | |||
| 618 | /* | ||
| 619 | * Release iommu->lock because ppr-handling might need to | ||
| 620 | * re-aquire it | ||
| 621 | */ | ||
| 622 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
| 623 | |||
| 624 | /* Handle PPR entry */ | ||
| 625 | iommu_handle_ppr_entry(iommu, entry); | ||
| 626 | |||
| 627 | spin_lock_irqsave(&iommu->lock, flags); | ||
| 628 | |||
| 629 | /* Refresh ring-buffer information */ | ||
| 630 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | ||
| 611 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | 631 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
| 612 | } | 632 | } |
| 613 | 633 | ||
| 614 | /* enable ppr interrupts again */ | ||
| 615 | writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
| 616 | |||
| 617 | spin_unlock_irqrestore(&iommu->lock, flags); | 634 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 618 | } | 635 | } |
| 619 | 636 | ||
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index c56790375e0f..542024ba6dba 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
| @@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | |||
| 1029 | if (!iommu->dev) | 1029 | if (!iommu->dev) |
| 1030 | return 1; | 1030 | return 1; |
| 1031 | 1031 | ||
| 1032 | iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, | ||
| 1033 | PCI_DEVFN(0, 0)); | ||
| 1034 | |||
| 1032 | iommu->cap_ptr = h->cap_ptr; | 1035 | iommu->cap_ptr = h->cap_ptr; |
| 1033 | iommu->pci_seg = h->pci_seg; | 1036 | iommu->pci_seg = h->pci_seg; |
| 1034 | iommu->mmio_phys = h->mmio_phys; | 1037 | iommu->mmio_phys = h->mmio_phys; |
| @@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) | |||
| 1323 | { | 1326 | { |
| 1324 | int i, j; | 1327 | int i, j; |
| 1325 | u32 ioc_feature_control; | 1328 | u32 ioc_feature_control; |
| 1326 | struct pci_dev *pdev = NULL; | 1329 | struct pci_dev *pdev = iommu->root_pdev; |
| 1327 | 1330 | ||
| 1328 | /* RD890 BIOSes may not have completely reconfigured the iommu */ | 1331 | /* RD890 BIOSes may not have completely reconfigured the iommu */ |
| 1329 | if (!is_rd890_iommu(iommu->dev)) | 1332 | if (!is_rd890_iommu(iommu->dev) || !pdev) |
| 1330 | return; | 1333 | return; |
| 1331 | 1334 | ||
| 1332 | /* | 1335 | /* |
| 1333 | * First, we need to ensure that the iommu is enabled. This is | 1336 | * First, we need to ensure that the iommu is enabled. This is |
| 1334 | * controlled by a register in the northbridge | 1337 | * controlled by a register in the northbridge |
| 1335 | */ | 1338 | */ |
| 1336 | pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); | ||
| 1337 | |||
| 1338 | if (!pdev) | ||
| 1339 | return; | ||
| 1340 | 1339 | ||
| 1341 | /* Select Northbridge indirect register 0x75 and enable writing */ | 1340 | /* Select Northbridge indirect register 0x75 and enable writing */ |
| 1342 | pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); | 1341 | pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); |
| @@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) | |||
| 1346 | if (!(ioc_feature_control & 0x1)) | 1345 | if (!(ioc_feature_control & 0x1)) |
| 1347 | pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); | 1346 | pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); |
| 1348 | 1347 | ||
| 1349 | pci_dev_put(pdev); | ||
| 1350 | |||
| 1351 | /* Restore the iommu BAR */ | 1348 | /* Restore the iommu BAR */ |
| 1352 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, | 1349 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, |
| 1353 | iommu->stored_addr_lo); | 1350 | iommu->stored_addr_lo); |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 2452f3b71736..24355559a2ad 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
| @@ -481,6 +481,9 @@ struct amd_iommu { | |||
| 481 | /* Pointer to PCI device of this IOMMU */ | 481 | /* Pointer to PCI device of this IOMMU */ |
| 482 | struct pci_dev *dev; | 482 | struct pci_dev *dev; |
| 483 | 483 | ||
| 484 | /* Cache pdev to root device for resume quirks */ | ||
| 485 | struct pci_dev *root_pdev; | ||
| 486 | |||
| 484 | /* physical address of MMIO space */ | 487 | /* physical address of MMIO space */ |
| 485 | u64 mmio_phys; | 488 | u64 mmio_phys; |
| 486 | /* virtual address of MMIO space */ | 489 | /* virtual address of MMIO space */ |
