aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2014-05-20 17:18:26 -0400
committerJoerg Roedel <jroedel@suse.de>2014-05-26 05:28:14 -0400
commite79df31c60ea79954e854616da233e1b8c6475ab (patch)
tree57c49b6f683f2b6546b6c13821cba577a8e2f7e8 /drivers/iommu
parent9163b9013542a688fe4152985118a9c46e2d255d (diff)
iommu/amd: Handle parallel invalidate_range_start/end calls correctly
Add a counter to the pasid_state so that we do not restore the original page-table before all invalidate_range_start to invalidate_range_end sections have finished. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu_v2.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index c65f3ec60bec..d4daa05efe60 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -45,6 +45,8 @@ struct pri_queue {
45struct pasid_state { 45struct pasid_state {
46 struct list_head list; /* For global state-list */ 46 struct list_head list; /* For global state-list */
47 atomic_t count; /* Reference count */ 47 atomic_t count; /* Reference count */
48 atomic_t mmu_notifier_count; /* Counting nested mmu_notifier
49 calls */
48 struct task_struct *task; /* Task bound to this PASID */ 50 struct task_struct *task; /* Task bound to this PASID */
49 struct mm_struct *mm; /* mm_struct for the faults */ 51 struct mm_struct *mm; /* mm_struct for the faults */
50 struct mmu_notifier mn; /* mmu_otifier handle */ 52 struct mmu_notifier mn; /* mmu_otifier handle */
@@ -433,8 +435,11 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn,
433 pasid_state = mn_to_state(mn); 435 pasid_state = mn_to_state(mn);
434 dev_state = pasid_state->device_state; 436 dev_state = pasid_state->device_state;
435 437
436 amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid, 438 if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) {
437 __pa(empty_page_table)); 439 amd_iommu_domain_set_gcr3(dev_state->domain,
440 pasid_state->pasid,
441 __pa(empty_page_table));
442 }
438} 443}
439 444
440static void mn_invalidate_range_end(struct mmu_notifier *mn, 445static void mn_invalidate_range_end(struct mmu_notifier *mn,
@@ -447,8 +452,11 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn,
447 pasid_state = mn_to_state(mn); 452 pasid_state = mn_to_state(mn);
448 dev_state = pasid_state->device_state; 453 dev_state = pasid_state->device_state;
449 454
450 amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid, 455 if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) {
451 __pa(pasid_state->mm->pgd)); 456 amd_iommu_domain_set_gcr3(dev_state->domain,
457 pasid_state->pasid,
458 __pa(pasid_state->mm->pgd));
459 }
452} 460}
453 461
454static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) 462static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -642,6 +650,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
642 goto out; 650 goto out;
643 651
644 atomic_set(&pasid_state->count, 1); 652 atomic_set(&pasid_state->count, 1);
653 atomic_set(&pasid_state->mmu_notifier_count, 0);
645 init_waitqueue_head(&pasid_state->wq); 654 init_waitqueue_head(&pasid_state->wq);
646 spin_lock_init(&pasid_state->lock); 655 spin_lock_init(&pasid_state->lock);
647 656