aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-27 22:00:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-27 22:00:45 -0400
commit3e7b256cba330240c8fcde3d01c65bfda12a2847 (patch)
treefa299b2447b399061ac2bd34737b0c455de7c200
parentd1fc98ba961db83293f804a1037e905c03b301cf (diff)
parentd73a6d722a675dbba8f6b52c964a7076a24a12c1 (diff)
Merge tag 'iommu-fixes-v3.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU fixes from Joerg Roedel: - fix VT-d regression with handling multiple RMRR entries per device - fix a small race that was left in the mmu_notifier handling in the AMD IOMMUv2 driver * tag 'iommu-fixes-v3.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: iommu/amd: Fix small race between invalidate_range_end/start iommu/vt-d: fix bug in handling multiple RMRRs for the same PCI device
-rw-r--r--drivers/iommu/amd_iommu_v2.c18
-rw-r--r--drivers/iommu/intel-iommu.c9
2 files changed, 16 insertions, 11 deletions
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index d4daa05efe60..499b4366a98d 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -45,7 +45,7 @@ struct pri_queue {
45struct pasid_state { 45struct pasid_state {
46 struct list_head list; /* For global state-list */ 46 struct list_head list; /* For global state-list */
47 atomic_t count; /* Reference count */ 47 atomic_t count; /* Reference count */
48 atomic_t mmu_notifier_count; /* Counting nested mmu_notifier 48 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
49 calls */ 49 calls */
50 struct task_struct *task; /* Task bound to this PASID */ 50 struct task_struct *task; /* Task bound to this PASID */
51 struct mm_struct *mm; /* mm_struct for the faults */ 51 struct mm_struct *mm; /* mm_struct for the faults */
@@ -53,7 +53,8 @@ struct pasid_state {
53 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */ 53 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
54 struct device_state *device_state; /* Link to our device_state */ 54 struct device_state *device_state; /* Link to our device_state */
55 int pasid; /* PASID index */ 55 int pasid; /* PASID index */
56 spinlock_t lock; /* Protect pri_queues */ 56 spinlock_t lock; /* Protect pri_queues and
57 mmu_notifer_count */
57 wait_queue_head_t wq; /* To wait for count == 0 */ 58 wait_queue_head_t wq; /* To wait for count == 0 */
58}; 59};
59 60
@@ -431,15 +432,19 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn,
431{ 432{
432 struct pasid_state *pasid_state; 433 struct pasid_state *pasid_state;
433 struct device_state *dev_state; 434 struct device_state *dev_state;
435 unsigned long flags;
434 436
435 pasid_state = mn_to_state(mn); 437 pasid_state = mn_to_state(mn);
436 dev_state = pasid_state->device_state; 438 dev_state = pasid_state->device_state;
437 439
438 if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) { 440 spin_lock_irqsave(&pasid_state->lock, flags);
441 if (pasid_state->mmu_notifier_count == 0) {
439 amd_iommu_domain_set_gcr3(dev_state->domain, 442 amd_iommu_domain_set_gcr3(dev_state->domain,
440 pasid_state->pasid, 443 pasid_state->pasid,
441 __pa(empty_page_table)); 444 __pa(empty_page_table));
442 } 445 }
446 pasid_state->mmu_notifier_count += 1;
447 spin_unlock_irqrestore(&pasid_state->lock, flags);
443} 448}
444 449
445static void mn_invalidate_range_end(struct mmu_notifier *mn, 450static void mn_invalidate_range_end(struct mmu_notifier *mn,
@@ -448,15 +453,19 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn,
448{ 453{
449 struct pasid_state *pasid_state; 454 struct pasid_state *pasid_state;
450 struct device_state *dev_state; 455 struct device_state *dev_state;
456 unsigned long flags;
451 457
452 pasid_state = mn_to_state(mn); 458 pasid_state = mn_to_state(mn);
453 dev_state = pasid_state->device_state; 459 dev_state = pasid_state->device_state;
454 460
455 if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) { 461 spin_lock_irqsave(&pasid_state->lock, flags);
462 pasid_state->mmu_notifier_count -= 1;
463 if (pasid_state->mmu_notifier_count == 0) {
456 amd_iommu_domain_set_gcr3(dev_state->domain, 464 amd_iommu_domain_set_gcr3(dev_state->domain,
457 pasid_state->pasid, 465 pasid_state->pasid,
458 __pa(pasid_state->mm->pgd)); 466 __pa(pasid_state->mm->pgd));
459 } 467 }
468 spin_unlock_irqrestore(&pasid_state->lock, flags);
460} 469}
461 470
462static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) 471static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -650,7 +659,6 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
650 goto out; 659 goto out;
651 660
652 atomic_set(&pasid_state->count, 1); 661 atomic_set(&pasid_state->count, 1);
653 atomic_set(&pasid_state->mmu_notifier_count, 0);
654 init_waitqueue_head(&pasid_state->wq); 662 init_waitqueue_head(&pasid_state->wq);
655 spin_lock_init(&pasid_state->lock); 663 spin_lock_init(&pasid_state->lock);
656 664
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 6bb32773c3ac..51b6b77dc3e5 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3816,14 +3816,11 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3816 ((void *)rmrr) + rmrr->header.length, 3816 ((void *)rmrr) + rmrr->header.length,
3817 rmrr->segment, rmrru->devices, 3817 rmrr->segment, rmrru->devices,
3818 rmrru->devices_cnt); 3818 rmrru->devices_cnt);
3819 if (ret > 0) 3819 if(ret < 0)
3820 break;
3821 else if(ret < 0)
3822 return ret; 3820 return ret;
3823 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { 3821 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3824 if (dmar_remove_dev_scope(info, rmrr->segment, 3822 dmar_remove_dev_scope(info, rmrr->segment,
3825 rmrru->devices, rmrru->devices_cnt)) 3823 rmrru->devices, rmrru->devices_cnt);
3826 break;
3827 } 3824 }
3828 } 3825 }
3829 3826