diff options
author | Jiang Liu <jiang.liu@linux.intel.com> | 2014-02-19 01:07:37 -0500 |
---|---|---|
committer | Joerg Roedel <joro@8bytes.org> | 2014-03-04 11:51:06 -0500 |
commit | 75f05569d0e51f6332a291c82abbeb7c8262e32d (patch) | |
tree | 24eeca4b270e4f0c3ca857c1993b109e685f1588 /drivers | |
parent | 2e45528930388658603ea24d49cf52867b928d3e (diff) |
iommu/vt-d: Update IOMMU state when memory hotplug happens
If static identity domain is created, IOMMU driver needs to update
si_domain page table when memory hotplug event happens. Otherwise
PCI device DMA operations can't access the hot-added memory regions.
Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Signed-off-by: Joerg Roedel <joro@8bytes.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 71 | ||||
-rw-r--r-- | drivers/iommu/iova.c | 64 |
2 files changed, 128 insertions, 7 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index dd576c067d0d..484d669d2720 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/dmar.h> | 33 | #include <linux/dmar.h> |
34 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
35 | #include <linux/mempool.h> | 35 | #include <linux/mempool.h> |
36 | #include <linux/memory.h> | ||
36 | #include <linux/timer.h> | 37 | #include <linux/timer.h> |
37 | #include <linux/iova.h> | 38 | #include <linux/iova.h> |
38 | #include <linux/iommu.h> | 39 | #include <linux/iommu.h> |
@@ -3683,6 +3684,73 @@ static struct notifier_block device_nb = { | |||
3683 | .notifier_call = device_notifier, | 3684 | .notifier_call = device_notifier, |
3684 | }; | 3685 | }; |
3685 | 3686 | ||
3687 | static int intel_iommu_memory_notifier(struct notifier_block *nb, | ||
3688 | unsigned long val, void *v) | ||
3689 | { | ||
3690 | struct memory_notify *mhp = v; | ||
3691 | unsigned long long start, end; | ||
3692 | unsigned long start_vpfn, last_vpfn; | ||
3693 | |||
3694 | switch (val) { | ||
3695 | case MEM_GOING_ONLINE: | ||
3696 | start = mhp->start_pfn << PAGE_SHIFT; | ||
3697 | end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1; | ||
3698 | if (iommu_domain_identity_map(si_domain, start, end)) { | ||
3699 | pr_warn("dmar: failed to build identity map for [%llx-%llx]\n", | ||
3700 | start, end); | ||
3701 | return NOTIFY_BAD; | ||
3702 | } | ||
3703 | break; | ||
3704 | |||
3705 | case MEM_OFFLINE: | ||
3706 | case MEM_CANCEL_ONLINE: | ||
3707 | start_vpfn = mm_to_dma_pfn(mhp->start_pfn); | ||
3708 | last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1); | ||
3709 | while (start_vpfn <= last_vpfn) { | ||
3710 | struct iova *iova; | ||
3711 | struct dmar_drhd_unit *drhd; | ||
3712 | struct intel_iommu *iommu; | ||
3713 | |||
3714 | iova = find_iova(&si_domain->iovad, start_vpfn); | ||
3715 | if (iova == NULL) { | ||
3716 | pr_debug("dmar: failed get IOVA for PFN %lx\n", | ||
3717 | start_vpfn); | ||
3718 | break; | ||
3719 | } | ||
3720 | |||
3721 | iova = split_and_remove_iova(&si_domain->iovad, iova, | ||
3722 | start_vpfn, last_vpfn); | ||
3723 | if (iova == NULL) { | ||
3724 | pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n", | ||
3725 | start_vpfn, last_vpfn); | ||
3726 | return NOTIFY_BAD; | ||
3727 | } | ||
3728 | |||
3729 | rcu_read_lock(); | ||
3730 | for_each_active_iommu(iommu, drhd) | ||
3731 | iommu_flush_iotlb_psi(iommu, si_domain->id, | ||
3732 | iova->pfn_lo, | ||
3733 | iova->pfn_hi - iova->pfn_lo + 1, 0); | ||
3734 | rcu_read_unlock(); | ||
3735 | dma_pte_clear_range(si_domain, iova->pfn_lo, | ||
3736 | iova->pfn_hi); | ||
3737 | dma_pte_free_pagetable(si_domain, iova->pfn_lo, | ||
3738 | iova->pfn_hi); | ||
3739 | |||
3740 | start_vpfn = iova->pfn_hi + 1; | ||
3741 | free_iova_mem(iova); | ||
3742 | } | ||
3743 | break; | ||
3744 | } | ||
3745 | |||
3746 | return NOTIFY_OK; | ||
3747 | } | ||
3748 | |||
3749 | static struct notifier_block intel_iommu_memory_nb = { | ||
3750 | .notifier_call = intel_iommu_memory_notifier, | ||
3751 | .priority = 0 | ||
3752 | }; | ||
3753 | |||
3686 | int __init intel_iommu_init(void) | 3754 | int __init intel_iommu_init(void) |
3687 | { | 3755 | { |
3688 | int ret = -ENODEV; | 3756 | int ret = -ENODEV; |
@@ -3755,8 +3823,9 @@ int __init intel_iommu_init(void) | |||
3755 | init_iommu_pm_ops(); | 3823 | init_iommu_pm_ops(); |
3756 | 3824 | ||
3757 | bus_set_iommu(&pci_bus_type, &intel_iommu_ops); | 3825 | bus_set_iommu(&pci_bus_type, &intel_iommu_ops); |
3758 | |||
3759 | bus_register_notifier(&pci_bus_type, &device_nb); | 3826 | bus_register_notifier(&pci_bus_type, &device_nb); |
3827 | if (si_domain && !hw_pass_through) | ||
3828 | register_memory_notifier(&intel_iommu_memory_nb); | ||
3760 | 3829 | ||
3761 | intel_iommu_enabled = 1; | 3830 | intel_iommu_enabled = 1; |
3762 | 3831 | ||
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 67da6cff74e8..f6b17e6af2fb 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
@@ -342,19 +342,30 @@ __is_range_overlap(struct rb_node *node, | |||
342 | return 0; | 342 | return 0; |
343 | } | 343 | } |
344 | 344 | ||
345 | static inline struct iova * | ||
346 | alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi) | ||
347 | { | ||
348 | struct iova *iova; | ||
349 | |||
350 | iova = alloc_iova_mem(); | ||
351 | if (iova) { | ||
352 | iova->pfn_lo = pfn_lo; | ||
353 | iova->pfn_hi = pfn_hi; | ||
354 | } | ||
355 | |||
356 | return iova; | ||
357 | } | ||
358 | |||
345 | static struct iova * | 359 | static struct iova * |
346 | __insert_new_range(struct iova_domain *iovad, | 360 | __insert_new_range(struct iova_domain *iovad, |
347 | unsigned long pfn_lo, unsigned long pfn_hi) | 361 | unsigned long pfn_lo, unsigned long pfn_hi) |
348 | { | 362 | { |
349 | struct iova *iova; | 363 | struct iova *iova; |
350 | 364 | ||
351 | iova = alloc_iova_mem(); | 365 | iova = alloc_and_init_iova(pfn_lo, pfn_hi); |
352 | if (!iova) | 366 | if (iova) |
353 | return iova; | 367 | iova_insert_rbtree(&iovad->rbroot, iova); |
354 | 368 | ||
355 | iova->pfn_hi = pfn_hi; | ||
356 | iova->pfn_lo = pfn_lo; | ||
357 | iova_insert_rbtree(&iovad->rbroot, iova); | ||
358 | return iova; | 369 | return iova; |
359 | } | 370 | } |
360 | 371 | ||
@@ -433,3 +444,44 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) | |||
433 | } | 444 | } |
434 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); | 445 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); |
435 | } | 446 | } |
447 | |||
448 | struct iova * | ||
449 | split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, | ||
450 | unsigned long pfn_lo, unsigned long pfn_hi) | ||
451 | { | ||
452 | unsigned long flags; | ||
453 | struct iova *prev = NULL, *next = NULL; | ||
454 | |||
455 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | ||
456 | if (iova->pfn_lo < pfn_lo) { | ||
457 | prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); | ||
458 | if (prev == NULL) | ||
459 | goto error; | ||
460 | } | ||
461 | if (iova->pfn_hi > pfn_hi) { | ||
462 | next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); | ||
463 | if (next == NULL) | ||
464 | goto error; | ||
465 | } | ||
466 | |||
467 | __cached_rbnode_delete_update(iovad, iova); | ||
468 | rb_erase(&iova->node, &iovad->rbroot); | ||
469 | |||
470 | if (prev) { | ||
471 | iova_insert_rbtree(&iovad->rbroot, prev); | ||
472 | iova->pfn_lo = pfn_lo; | ||
473 | } | ||
474 | if (next) { | ||
475 | iova_insert_rbtree(&iovad->rbroot, next); | ||
476 | iova->pfn_hi = pfn_hi; | ||
477 | } | ||
478 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | ||
479 | |||
480 | return iova; | ||
481 | |||
482 | error: | ||
483 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | ||
484 | if (prev) | ||
485 | free_iova_mem(prev); | ||
486 | return NULL; | ||
487 | } | ||