diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2012-06-01 09:20:23 -0400 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2012-06-04 06:47:44 -0400 |
commit | eee53537c476c947bf7faa1c916d2f5a0ae8ec93 (patch) | |
tree | 0ca87099455da1cd0bf48affee27516a4bd1fa38 /drivers/iommu | |
parent | c1bf94ec1e12d76838ad485158aecf208ebd8fb9 (diff) |
iommu/amd: Fix deadlock in ppr-handling error path
In the error path of the ppr_notifer it can happen that the
iommu->lock is taken recursivly. This patch fixes the
problem by releasing the iommu->lock before any notifier is
invoked. This also requires to move the erratum workaround
for the ppr-log (interrupt may be faster than data in the log)
one function up.
Cc: stable@vger.kernel.org # v3.3, v3.4
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 71 |
1 files changed, 44 insertions, 27 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d90a421e9cac..a2e418cba0ff 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -547,26 +547,12 @@ static void iommu_poll_events(struct amd_iommu *iommu) | |||
547 | spin_unlock_irqrestore(&iommu->lock, flags); | 547 | spin_unlock_irqrestore(&iommu->lock, flags); |
548 | } | 548 | } |
549 | 549 | ||
550 | static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) | 550 | static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) |
551 | { | 551 | { |
552 | struct amd_iommu_fault fault; | 552 | struct amd_iommu_fault fault; |
553 | volatile u64 *raw; | ||
554 | int i; | ||
555 | 553 | ||
556 | INC_STATS_COUNTER(pri_requests); | 554 | INC_STATS_COUNTER(pri_requests); |
557 | 555 | ||
558 | raw = (u64 *)(iommu->ppr_log + head); | ||
559 | |||
560 | /* | ||
561 | * Hardware bug: Interrupt may arrive before the entry is written to | ||
562 | * memory. If this happens we need to wait for the entry to arrive. | ||
563 | */ | ||
564 | for (i = 0; i < LOOP_TIMEOUT; ++i) { | ||
565 | if (PPR_REQ_TYPE(raw[0]) != 0) | ||
566 | break; | ||
567 | udelay(1); | ||
568 | } | ||
569 | |||
570 | if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { | 556 | if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { |
571 | pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); | 557 | pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); |
572 | return; | 558 | return; |
@@ -578,12 +564,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) | |||
578 | fault.tag = PPR_TAG(raw[0]); | 564 | fault.tag = PPR_TAG(raw[0]); |
579 | fault.flags = PPR_FLAGS(raw[0]); | 565 | fault.flags = PPR_FLAGS(raw[0]); |
580 | 566 | ||
581 | /* | ||
582 | * To detect the hardware bug we need to clear the entry | ||
583 | * to back to zero. | ||
584 | */ | ||
585 | raw[0] = raw[1] = 0; | ||
586 | |||
587 | atomic_notifier_call_chain(&ppr_notifier, 0, &fault); | 567 | atomic_notifier_call_chain(&ppr_notifier, 0, &fault); |
588 | } | 568 | } |
589 | 569 | ||
@@ -595,25 +575,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) | |||
595 | if (iommu->ppr_log == NULL) | 575 | if (iommu->ppr_log == NULL) |
596 | return; | 576 | return; |
597 | 577 | ||
578 | /* enable ppr interrupts again */ | ||
579 | writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
580 | |||
598 | spin_lock_irqsave(&iommu->lock, flags); | 581 | spin_lock_irqsave(&iommu->lock, flags); |
599 | 582 | ||
600 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | 583 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
601 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | 584 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
602 | 585 | ||
603 | while (head != tail) { | 586 | while (head != tail) { |
587 | volatile u64 *raw; | ||
588 | u64 entry[2]; | ||
589 | int i; | ||
604 | 590 | ||
605 | /* Handle PPR entry */ | 591 | raw = (u64 *)(iommu->ppr_log + head); |
606 | iommu_handle_ppr_entry(iommu, head); | 592 | |
593 | /* | ||
594 | * Hardware bug: Interrupt may arrive before the entry is | ||
595 | * written to memory. If this happens we need to wait for the | ||
596 | * entry to arrive. | ||
597 | */ | ||
598 | for (i = 0; i < LOOP_TIMEOUT; ++i) { | ||
599 | if (PPR_REQ_TYPE(raw[0]) != 0) | ||
600 | break; | ||
601 | udelay(1); | ||
602 | } | ||
603 | |||
604 | /* Avoid memcpy function-call overhead */ | ||
605 | entry[0] = raw[0]; | ||
606 | entry[1] = raw[1]; | ||
607 | 607 | ||
608 | /* Update and refresh ring-buffer state*/ | 608 | /* |
609 | * To detect the hardware bug we need to clear the entry | ||
610 | * back to zero. | ||
611 | */ | ||
612 | raw[0] = raw[1] = 0UL; | ||
613 | |||
614 | /* Update head pointer of hardware ring-buffer */ | ||
609 | head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; | 615 | head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; |
610 | writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | 616 | writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
617 | |||
618 | /* | ||
619 | * Release iommu->lock because ppr-handling might need to | ||
620 | * re-aquire it | ||
621 | */ | ||
622 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
623 | |||
624 | /* Handle PPR entry */ | ||
625 | iommu_handle_ppr_entry(iommu, entry); | ||
626 | |||
627 | spin_lock_irqsave(&iommu->lock, flags); | ||
628 | |||
629 | /* Refresh ring-buffer information */ | ||
630 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | ||
611 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | 631 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
612 | } | 632 | } |
613 | 633 | ||
614 | /* enable ppr interrupts again */ | ||
615 | writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
616 | |||
617 | spin_unlock_irqrestore(&iommu->lock, flags); | 634 | spin_unlock_irqrestore(&iommu->lock, flags); |
618 | } | 635 | } |
619 | 636 | ||