aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-02-16 11:04:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-02-16 11:04:06 -0500
commit87bbcfdecc5579042459bb42ede81ed23e4b9a79 (patch)
tree87dd2ea0914784961e15f7e24ec9d5e9df816c37 /drivers/iommu
parente5310a1cb4f56c54488ecaf29b1abf3b790cfddc (diff)
parent46924008273ed03bd11dbb32136e3da4cfe056e1 (diff)
Merge tag 'for-linus-20160216' of git://git.infradead.org/intel-iommu
Pull IOMMU SVM fixes from David Woodhouse: "Minor register size and interrupt acknowledgement fixes which only showed up in testing on newer hardware, but mostly a fix to the MM refcount handling to prevent a recursive refcount issue when mmap() is used on the file descriptor associated with a bound PASID" * tag 'for-linus-20160216' of git://git.infradead.org/intel-iommu: iommu/vt-d: Clear PPR bit to ensure we get more page request interrupts iommu/vt-d: Fix 64-bit accesses to 32-bit DMAR_GSTS_REG iommu/vt-d: Fix mm refcounting to hold mm_count not mm_users
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/intel-svm.c37
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
3 files changed, 33 insertions, 8 deletions
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 62a400c5ba06..fb092f3f11cb 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1353,7 +1353,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
1353 1353
1354 raw_spin_lock_irqsave(&iommu->register_lock, flags); 1354 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1355 1355
1356 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 1356 sts = readl(iommu->reg + DMAR_GSTS_REG);
1357 if (!(sts & DMA_GSTS_QIES)) 1357 if (!(sts & DMA_GSTS_QIES))
1358 goto end; 1358 goto end;
1359 1359
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 50464833d0b8..d9939fa9b588 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -249,12 +249,30 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s
249static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) 249static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
250{ 250{
251 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); 251 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
252 struct intel_svm_dev *sdev;
252 253
254 /* This might end up being called from exit_mmap(), *before* the page
255 * tables are cleared. And __mmu_notifier_release() will delete us from
256 * the list of notifiers so that our invalidate_range() callback doesn't
257 * get called when the page tables are cleared. So we need to protect
258 * against hardware accessing those page tables.
259 *
260 * We do it by clearing the entry in the PASID table and then flushing
261 * the IOTLB and the PASID table caches. This might upset hardware;
262 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
263 * page) so that we end up taking a fault that the hardware really
264 * *has* to handle gracefully without affecting other processes.
265 */
253 svm->iommu->pasid_table[svm->pasid].val = 0; 266 svm->iommu->pasid_table[svm->pasid].val = 0;
267 wmb();
268
269 rcu_read_lock();
270 list_for_each_entry_rcu(sdev, &svm->devs, list) {
271 intel_flush_pasid_dev(svm, sdev, svm->pasid);
272 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
273 }
274 rcu_read_unlock();
254 275
255 /* There's no need to do any flush because we can't get here if there
256 * are any devices left anyway. */
257 WARN_ON(!list_empty(&svm->devs));
258} 276}
259 277
260static const struct mmu_notifier_ops intel_mmuops = { 278static const struct mmu_notifier_ops intel_mmuops = {
@@ -379,7 +397,6 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
379 goto out; 397 goto out;
380 } 398 }
381 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1; 399 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
382 mm = NULL;
383 } else 400 } else
384 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11); 401 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
385 wmb(); 402 wmb();
@@ -442,11 +459,11 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
442 kfree_rcu(sdev, rcu); 459 kfree_rcu(sdev, rcu);
443 460
444 if (list_empty(&svm->devs)) { 461 if (list_empty(&svm->devs)) {
445 mmu_notifier_unregister(&svm->notifier, svm->mm);
446 462
447 idr_remove(&svm->iommu->pasid_idr, svm->pasid); 463 idr_remove(&svm->iommu->pasid_idr, svm->pasid);
448 if (svm->mm) 464 if (svm->mm)
449 mmput(svm->mm); 465 mmu_notifier_unregister(&svm->notifier, svm->mm);
466
450 /* We mandate that no page faults may be outstanding 467 /* We mandate that no page faults may be outstanding
451 * for the PASID when intel_svm_unbind_mm() is called. 468 * for the PASID when intel_svm_unbind_mm() is called.
452 * If that is not obeyed, subtle errors will happen. 469 * If that is not obeyed, subtle errors will happen.
@@ -507,6 +524,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
507 struct intel_svm *svm = NULL; 524 struct intel_svm *svm = NULL;
508 int head, tail, handled = 0; 525 int head, tail, handled = 0;
509 526
527 /* Clear PPR bit before reading head/tail registers, to
528 * ensure that we get a new interrupt if needed. */
529 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
530
510 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; 531 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
511 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; 532 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
512 while (head != tail) { 533 while (head != tail) {
@@ -551,6 +572,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
551 * any faults on kernel addresses. */ 572 * any faults on kernel addresses. */
552 if (!svm->mm) 573 if (!svm->mm)
553 goto bad_req; 574 goto bad_req;
575 /* If the mm is already defunct, don't handle faults. */
576 if (!atomic_inc_not_zero(&svm->mm->mm_users))
577 goto bad_req;
554 down_read(&svm->mm->mmap_sem); 578 down_read(&svm->mm->mmap_sem);
555 vma = find_extend_vma(svm->mm, address); 579 vma = find_extend_vma(svm->mm, address);
556 if (!vma || address < vma->vm_start) 580 if (!vma || address < vma->vm_start)
@@ -567,6 +591,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
567 result = QI_RESP_SUCCESS; 591 result = QI_RESP_SUCCESS;
568 invalid: 592 invalid:
569 up_read(&svm->mm->mmap_sem); 593 up_read(&svm->mm->mmap_sem);
594 mmput(svm->mm);
570 bad_req: 595 bad_req:
571 /* Accounting for major/minor faults? */ 596 /* Accounting for major/minor faults? */
572 rcu_read_lock(); 597 rcu_read_lock();
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index c12ba4516df2..ac596928f6b4 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -629,7 +629,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
629 629
630 raw_spin_lock_irqsave(&iommu->register_lock, flags); 630 raw_spin_lock_irqsave(&iommu->register_lock, flags);
631 631
632 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 632 sts = readl(iommu->reg + DMAR_GSTS_REG);
633 if (!(sts & DMA_GSTS_IRES)) 633 if (!(sts & DMA_GSTS_IRES))
634 goto end; 634 goto end;
635 635