aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm-smmu.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2016-08-05 14:49:45 -0400
committerWill Deacon <will.deacon@arm.com>2016-08-19 04:48:01 -0400
commit3714ce1d6655098ee69ede632883e5874d67e4ab (patch)
tree7245fb629d464eb2da13da01ae556d8be7078887 /drivers/iommu/arm-smmu.c
parentaea2037e0d3e23c3be1498feae29f71ca997d9e6 (diff)
iommu/arm-smmu: Disable stalling faults for all endpoints
Enabling stalling faults can result in hardware deadlock on poorly designed systems, particularly those with a PCI root complex upstream of the SMMU. Although it's not really Linux's job to save hardware integrators from their own misfortune, it *is* our job to stop userspace (e.g. VFIO clients) from hosing the system for everybody else, even if they might already be required to have elevated privileges. Given that the fault handling code currently executes entirely in IRQ context, there is nothing that can sensibly be done to recover from things like page faults anyway, so let's rip this code out for now and avoid the potential for deadlock. Cc: <stable@vger.kernel.org> Fixes: 48ec83bcbcf5 ("iommu/arm-smmu: Add initial driver support for ARM SMMUv3 devices") Reported-by: Matt Evans <matt.evans@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r--drivers/iommu/arm-smmu.c34
1 files changed, 7 insertions, 27 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 4f49fe29f202..2db74ebc3240 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -686,8 +686,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = {
686 686
687static irqreturn_t arm_smmu_context_fault(int irq, void *dev) 687static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
688{ 688{
689 int flags, ret; 689 u32 fsr, fsynr;
690 u32 fsr, fsynr, resume;
691 unsigned long iova; 690 unsigned long iova;
692 struct iommu_domain *domain = dev; 691 struct iommu_domain *domain = dev;
693 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 692 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -701,34 +700,15 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
701 if (!(fsr & FSR_FAULT)) 700 if (!(fsr & FSR_FAULT))
702 return IRQ_NONE; 701 return IRQ_NONE;
703 702
704 if (fsr & FSR_IGN)
705 dev_err_ratelimited(smmu->dev,
706 "Unexpected context fault (fsr 0x%x)\n",
707 fsr);
708
709 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); 703 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
710 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
711
712 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR); 704 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
713 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
714 ret = IRQ_HANDLED;
715 resume = RESUME_RETRY;
716 } else {
717 dev_err_ratelimited(smmu->dev,
718 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
719 iova, fsynr, cfg->cbndx);
720 ret = IRQ_NONE;
721 resume = RESUME_TERMINATE;
722 }
723
724 /* Clear the faulting FSR */
725 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
726 705
727 /* Retry or terminate any stalled transactions */ 706 dev_err_ratelimited(smmu->dev,
728 if (fsr & FSR_SS) 707 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
729 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME); 708 fsr, iova, fsynr, cfg->cbndx);
730 709
731 return ret; 710 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
711 return IRQ_HANDLED;
732} 712}
733 713
734static irqreturn_t arm_smmu_global_fault(int irq, void *dev) 714static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
@@ -837,7 +817,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
837 } 817 }
838 818
839 /* SCTLR */ 819 /* SCTLR */
840 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; 820 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
841 if (stage1) 821 if (stage1)
842 reg |= SCTLR_S1_ASIDPNE; 822 reg |= SCTLR_S1_ASIDPNE;
843#ifdef __BIG_ENDIAN 823#ifdef __BIG_ENDIAN