aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>2013-04-22 17:32:34 -0400
committerJoerg Roedel <joro@8bytes.org>2013-04-23 08:34:26 -0400
commit3f398bc7762adcd860bd2acce18465a106f47325 (patch)
tree120d9bde4c592f3561d1f218fbea00b11b631393 /drivers/iommu
parentd3263bc29706e42f74d8800807c2dedf320d77f1 (diff)
iommu/AMD: Per-thread IOMMU Interrupt Handling
In the current interrupt handling scheme, there are as many threads as the number of IOMMUs. Each thread is created and assigned to an IOMMU at the time of registering interrupt handlers (request_threaded_irq). When an IOMMU HW generates an interrupt, the irq handler (top half) wakes up the corresponding thread to process event and PPR logs of all IOMMUs starting from the 1st IOMMU. In the system with multiple IOMMU,this handling scheme complicates the synchronization of the IOMMU data structures and status registers as there could be multiple threads competing for the same IOMMU while the other IOMMU could be left unhandled. To simplify, this patch is proposing a different interrupt handling scheme by having each thread only managing interrupts of the corresponding IOMMU. This can be achieved by passing the struct amd_iommu when registering the interrupt handlers. This structure is unique for each IOMMU and can be used by the bottom half thread to identify the IOMMU to be handled instead of calling for_each_iommu. Besides this also eliminate the needs to lock the IOMMU for processing event and PPR logs. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c82
-rw-r--r--drivers/iommu/amd_iommu_init.c2
2 files changed, 34 insertions, 50 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 27792f8c429d..c6f3c7e04684 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -700,22 +700,7 @@ retry:
700 700
701static void iommu_poll_events(struct amd_iommu *iommu) 701static void iommu_poll_events(struct amd_iommu *iommu)
702{ 702{
703 u32 head, tail, status; 703 u32 head, tail;
704 unsigned long flags;
705
706 spin_lock_irqsave(&iommu->lock, flags);
707
708 /* enable event interrupts again */
709 do {
710 /*
711 * Workaround for Erratum ERBT1312
712 * Clearing the EVT_INT bit may race in the hardware, so read
713 * it again and make sure it was really cleared
714 */
715 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
716 writel(MMIO_STATUS_EVT_INT_MASK,
717 iommu->mmio_base + MMIO_STATUS_OFFSET);
718 } while (status & MMIO_STATUS_EVT_INT_MASK);
719 704
720 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 705 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
721 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 706 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
@@ -726,8 +711,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
726 } 711 }
727 712
728 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 713 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
729
730 spin_unlock_irqrestore(&iommu->lock, flags);
731} 714}
732 715
733static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) 716static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
@@ -752,26 +735,11 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
752 735
753static void iommu_poll_ppr_log(struct amd_iommu *iommu) 736static void iommu_poll_ppr_log(struct amd_iommu *iommu)
754{ 737{
755 unsigned long flags; 738 u32 head, tail;
756 u32 head, tail, status;
757 739
758 if (iommu->ppr_log == NULL) 740 if (iommu->ppr_log == NULL)
759 return; 741 return;
760 742
761 spin_lock_irqsave(&iommu->lock, flags);
762
763 /* enable ppr interrupts again */
764 do {
765 /*
766 * Workaround for Erratum ERBT1312
767 * Clearing the PPR_INT bit may race in the hardware, so read
768 * it again and make sure it was really cleared
769 */
770 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
771 writel(MMIO_STATUS_PPR_INT_MASK,
772 iommu->mmio_base + MMIO_STATUS_OFFSET);
773 } while (status & MMIO_STATUS_PPR_INT_MASK);
774
775 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 743 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
776 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 744 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
777 745
@@ -807,34 +775,50 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
807 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; 775 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
808 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 776 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
809 777
810 /*
811 * Release iommu->lock because ppr-handling might need to
812 * re-acquire it
813 */
814 spin_unlock_irqrestore(&iommu->lock, flags);
815
816 /* Handle PPR entry */ 778 /* Handle PPR entry */
817 iommu_handle_ppr_entry(iommu, entry); 779 iommu_handle_ppr_entry(iommu, entry);
818 780
819 spin_lock_irqsave(&iommu->lock, flags);
820
821 /* Refresh ring-buffer information */ 781 /* Refresh ring-buffer information */
822 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 782 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
823 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 783 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
824 } 784 }
825
826 spin_unlock_irqrestore(&iommu->lock, flags);
827} 785}
828 786
829irqreturn_t amd_iommu_int_thread(int irq, void *data) 787irqreturn_t amd_iommu_int_thread(int irq, void *data)
830{ 788{
831 struct amd_iommu *iommu; 789 struct amd_iommu *iommu = (struct amd_iommu *) data;
790 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
832 791
833 for_each_iommu(iommu) { 792 while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
834 iommu_poll_events(iommu); 793 /* Enable EVT and PPR interrupts again */
835 iommu_poll_ppr_log(iommu); 794 writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
836 } 795 iommu->mmio_base + MMIO_STATUS_OFFSET);
837 796
797 if (status & MMIO_STATUS_EVT_INT_MASK) {
798 pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
799 iommu_poll_events(iommu);
800 }
801
802 if (status & MMIO_STATUS_PPR_INT_MASK) {
803 pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
804 iommu_poll_ppr_log(iommu);
805 }
806
807 /*
808 * Hardware bug: ERBT1312
809 * When re-enabling interrupt (by writing 1
810 * to clear the bit), the hardware might also try to set
811 * the interrupt bit in the event status register.
812 * In this scenario, the bit will be set, and disable
813 * subsequent interrupts.
814 *
815 * Workaround: The IOMMU driver should read back the
816 * status register and check if the interrupt bits are cleared.
817 * If not, driver will need to go through the interrupt handler
818 * again and re-clear the bits
819 */
820 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
821 }
838 return IRQ_HANDLED; 822 return IRQ_HANDLED;
839} 823}
840 824
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 976794166481..3d3d6cd52d47 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1324,7 +1324,7 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
1324 amd_iommu_int_handler, 1324 amd_iommu_int_handler,
1325 amd_iommu_int_thread, 1325 amd_iommu_int_thread,
1326 0, "AMD-Vi", 1326 0, "AMD-Vi",
1327 iommu->dev); 1327 iommu);
1328 1328
1329 if (r) { 1329 if (r) {
1330 pci_disable_msi(iommu->dev); 1330 pci_disable_msi(iommu->dev);