diff options
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 307 |
1 files changed, 234 insertions, 73 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 02cd26a17fe0..1232336b960e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -195,6 +195,7 @@ static inline void set_root_present(struct root_entry *root) | |||
195 | } | 195 | } |
196 | static inline void set_root_value(struct root_entry *root, unsigned long value) | 196 | static inline void set_root_value(struct root_entry *root, unsigned long value) |
197 | { | 197 | { |
198 | root->val &= ~VTD_PAGE_MASK; | ||
198 | root->val |= value & VTD_PAGE_MASK; | 199 | root->val |= value & VTD_PAGE_MASK; |
199 | } | 200 | } |
200 | 201 | ||
@@ -247,6 +248,7 @@ static inline void context_set_translation_type(struct context_entry *context, | |||
247 | static inline void context_set_address_root(struct context_entry *context, | 248 | static inline void context_set_address_root(struct context_entry *context, |
248 | unsigned long value) | 249 | unsigned long value) |
249 | { | 250 | { |
251 | context->lo &= ~VTD_PAGE_MASK; | ||
250 | context->lo |= value & VTD_PAGE_MASK; | 252 | context->lo |= value & VTD_PAGE_MASK; |
251 | } | 253 | } |
252 | 254 | ||
@@ -328,17 +330,10 @@ static int hw_pass_through = 1; | |||
328 | /* si_domain contains mulitple devices */ | 330 | /* si_domain contains mulitple devices */ |
329 | #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1) | 331 | #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1) |
330 | 332 | ||
331 | /* define the limit of IOMMUs supported in each domain */ | ||
332 | #ifdef CONFIG_X86 | ||
333 | # define IOMMU_UNITS_SUPPORTED MAX_IO_APICS | ||
334 | #else | ||
335 | # define IOMMU_UNITS_SUPPORTED 64 | ||
336 | #endif | ||
337 | |||
338 | struct dmar_domain { | 333 | struct dmar_domain { |
339 | int id; /* domain id */ | 334 | int id; /* domain id */ |
340 | int nid; /* node id */ | 335 | int nid; /* node id */ |
341 | DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED); | 336 | DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED); |
342 | /* bitmap of iommus this domain uses*/ | 337 | /* bitmap of iommus this domain uses*/ |
343 | 338 | ||
344 | struct list_head devices; /* all devices' list */ | 339 | struct list_head devices; /* all devices' list */ |
@@ -1132,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) | |||
1132 | unsigned long flags; | 1127 | unsigned long flags; |
1133 | 1128 | ||
1134 | root = (struct root_entry *)alloc_pgtable_page(iommu->node); | 1129 | root = (struct root_entry *)alloc_pgtable_page(iommu->node); |
1135 | if (!root) | 1130 | if (!root) { |
1131 | pr_err("IOMMU: allocating root entry for %s failed\n", | ||
1132 | iommu->name); | ||
1136 | return -ENOMEM; | 1133 | return -ENOMEM; |
1134 | } | ||
1137 | 1135 | ||
1138 | __iommu_flush_cache(iommu, root, ROOT_SIZE); | 1136 | __iommu_flush_cache(iommu, root, ROOT_SIZE); |
1139 | 1137 | ||
@@ -1473,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
1473 | return 0; | 1471 | return 0; |
1474 | } | 1472 | } |
1475 | 1473 | ||
1476 | static void free_dmar_iommu(struct intel_iommu *iommu) | 1474 | static void disable_dmar_iommu(struct intel_iommu *iommu) |
1477 | { | 1475 | { |
1478 | struct dmar_domain *domain; | 1476 | struct dmar_domain *domain; |
1479 | int i; | 1477 | int i; |
@@ -1497,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu) | |||
1497 | 1495 | ||
1498 | if (iommu->gcmd & DMA_GCMD_TE) | 1496 | if (iommu->gcmd & DMA_GCMD_TE) |
1499 | iommu_disable_translation(iommu); | 1497 | iommu_disable_translation(iommu); |
1498 | } | ||
1500 | 1499 | ||
1501 | kfree(iommu->domains); | 1500 | static void free_dmar_iommu(struct intel_iommu *iommu) |
1502 | kfree(iommu->domain_ids); | 1501 | { |
1503 | iommu->domains = NULL; | 1502 | if ((iommu->domains) && (iommu->domain_ids)) { |
1504 | iommu->domain_ids = NULL; | 1503 | kfree(iommu->domains); |
1504 | kfree(iommu->domain_ids); | ||
1505 | iommu->domains = NULL; | ||
1506 | iommu->domain_ids = NULL; | ||
1507 | } | ||
1505 | 1508 | ||
1506 | g_iommus[iommu->seq_id] = NULL; | 1509 | g_iommus[iommu->seq_id] = NULL; |
1507 | 1510 | ||
@@ -1983,7 +1986,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
1983 | { | 1986 | { |
1984 | struct dma_pte *first_pte = NULL, *pte = NULL; | 1987 | struct dma_pte *first_pte = NULL, *pte = NULL; |
1985 | phys_addr_t uninitialized_var(pteval); | 1988 | phys_addr_t uninitialized_var(pteval); |
1986 | unsigned long sg_res; | 1989 | unsigned long sg_res = 0; |
1987 | unsigned int largepage_lvl = 0; | 1990 | unsigned int largepage_lvl = 0; |
1988 | unsigned long lvl_pages = 0; | 1991 | unsigned long lvl_pages = 0; |
1989 | 1992 | ||
@@ -1994,10 +1997,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
1994 | 1997 | ||
1995 | prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; | 1998 | prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; |
1996 | 1999 | ||
1997 | if (sg) | 2000 | if (!sg) { |
1998 | sg_res = 0; | 2001 | sg_res = nr_pages; |
1999 | else { | ||
2000 | sg_res = nr_pages + 1; | ||
2001 | pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; | 2002 | pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; |
2002 | } | 2003 | } |
2003 | 2004 | ||
@@ -2708,6 +2709,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw) | |||
2708 | return 0; | 2709 | return 0; |
2709 | } | 2710 | } |
2710 | 2711 | ||
2712 | static void intel_iommu_init_qi(struct intel_iommu *iommu) | ||
2713 | { | ||
2714 | /* | ||
2715 | * Start from the sane iommu hardware state. | ||
2716 | * If the queued invalidation is already initialized by us | ||
2717 | * (for example, while enabling interrupt-remapping) then | ||
2718 | * we got the things already rolling from a sane state. | ||
2719 | */ | ||
2720 | if (!iommu->qi) { | ||
2721 | /* | ||
2722 | * Clear any previous faults. | ||
2723 | */ | ||
2724 | dmar_fault(-1, iommu); | ||
2725 | /* | ||
2726 | * Disable queued invalidation if supported and already enabled | ||
2727 | * before OS handover. | ||
2728 | */ | ||
2729 | dmar_disable_qi(iommu); | ||
2730 | } | ||
2731 | |||
2732 | if (dmar_enable_qi(iommu)) { | ||
2733 | /* | ||
2734 | * Queued Invalidate not enabled, use Register Based Invalidate | ||
2735 | */ | ||
2736 | iommu->flush.flush_context = __iommu_flush_context; | ||
2737 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; | ||
2738 | pr_info("IOMMU: %s using Register based invalidation\n", | ||
2739 | iommu->name); | ||
2740 | } else { | ||
2741 | iommu->flush.flush_context = qi_flush_context; | ||
2742 | iommu->flush.flush_iotlb = qi_flush_iotlb; | ||
2743 | pr_info("IOMMU: %s using Queued invalidation\n", iommu->name); | ||
2744 | } | ||
2745 | } | ||
2746 | |||
2711 | static int __init init_dmars(void) | 2747 | static int __init init_dmars(void) |
2712 | { | 2748 | { |
2713 | struct dmar_drhd_unit *drhd; | 2749 | struct dmar_drhd_unit *drhd; |
@@ -2728,14 +2764,18 @@ static int __init init_dmars(void) | |||
2728 | * threaded kernel __init code path all other access are read | 2764 | * threaded kernel __init code path all other access are read |
2729 | * only | 2765 | * only |
2730 | */ | 2766 | */ |
2731 | if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) { | 2767 | if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) { |
2732 | g_num_of_iommus++; | 2768 | g_num_of_iommus++; |
2733 | continue; | 2769 | continue; |
2734 | } | 2770 | } |
2735 | printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n", | 2771 | printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n", |
2736 | IOMMU_UNITS_SUPPORTED); | 2772 | DMAR_UNITS_SUPPORTED); |
2737 | } | 2773 | } |
2738 | 2774 | ||
2775 | /* Preallocate enough resources for IOMMU hot-addition */ | ||
2776 | if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) | ||
2777 | g_num_of_iommus = DMAR_UNITS_SUPPORTED; | ||
2778 | |||
2739 | g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), | 2779 | g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), |
2740 | GFP_KERNEL); | 2780 | GFP_KERNEL); |
2741 | if (!g_iommus) { | 2781 | if (!g_iommus) { |
@@ -2764,58 +2804,14 @@ static int __init init_dmars(void) | |||
2764 | * among all IOMMU's. Need to Split it later. | 2804 | * among all IOMMU's. Need to Split it later. |
2765 | */ | 2805 | */ |
2766 | ret = iommu_alloc_root_entry(iommu); | 2806 | ret = iommu_alloc_root_entry(iommu); |
2767 | if (ret) { | 2807 | if (ret) |
2768 | printk(KERN_ERR "IOMMU: allocate root entry failed\n"); | ||
2769 | goto free_iommu; | 2808 | goto free_iommu; |
2770 | } | ||
2771 | if (!ecap_pass_through(iommu->ecap)) | 2809 | if (!ecap_pass_through(iommu->ecap)) |
2772 | hw_pass_through = 0; | 2810 | hw_pass_through = 0; |
2773 | } | 2811 | } |
2774 | 2812 | ||
2775 | /* | 2813 | for_each_active_iommu(iommu, drhd) |
2776 | * Start from the sane iommu hardware state. | 2814 | intel_iommu_init_qi(iommu); |
2777 | */ | ||
2778 | for_each_active_iommu(iommu, drhd) { | ||
2779 | /* | ||
2780 | * If the queued invalidation is already initialized by us | ||
2781 | * (for example, while enabling interrupt-remapping) then | ||
2782 | * we got the things already rolling from a sane state. | ||
2783 | */ | ||
2784 | if (iommu->qi) | ||
2785 | continue; | ||
2786 | |||
2787 | /* | ||
2788 | * Clear any previous faults. | ||
2789 | */ | ||
2790 | dmar_fault(-1, iommu); | ||
2791 | /* | ||
2792 | * Disable queued invalidation if supported and already enabled | ||
2793 | * before OS handover. | ||
2794 | */ | ||
2795 | dmar_disable_qi(iommu); | ||
2796 | } | ||
2797 | |||
2798 | for_each_active_iommu(iommu, drhd) { | ||
2799 | if (dmar_enable_qi(iommu)) { | ||
2800 | /* | ||
2801 | * Queued Invalidate not enabled, use Register Based | ||
2802 | * Invalidate | ||
2803 | */ | ||
2804 | iommu->flush.flush_context = __iommu_flush_context; | ||
2805 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; | ||
2806 | printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based " | ||
2807 | "invalidation\n", | ||
2808 | iommu->seq_id, | ||
2809 | (unsigned long long)drhd->reg_base_addr); | ||
2810 | } else { | ||
2811 | iommu->flush.flush_context = qi_flush_context; | ||
2812 | iommu->flush.flush_iotlb = qi_flush_iotlb; | ||
2813 | printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued " | ||
2814 | "invalidation\n", | ||
2815 | iommu->seq_id, | ||
2816 | (unsigned long long)drhd->reg_base_addr); | ||
2817 | } | ||
2818 | } | ||
2819 | 2815 | ||
2820 | if (iommu_pass_through) | 2816 | if (iommu_pass_through) |
2821 | iommu_identity_mapping |= IDENTMAP_ALL; | 2817 | iommu_identity_mapping |= IDENTMAP_ALL; |
@@ -2901,8 +2897,10 @@ static int __init init_dmars(void) | |||
2901 | return 0; | 2897 | return 0; |
2902 | 2898 | ||
2903 | free_iommu: | 2899 | free_iommu: |
2904 | for_each_active_iommu(iommu, drhd) | 2900 | for_each_active_iommu(iommu, drhd) { |
2901 | disable_dmar_iommu(iommu); | ||
2905 | free_dmar_iommu(iommu); | 2902 | free_dmar_iommu(iommu); |
2903 | } | ||
2906 | kfree(deferred_flush); | 2904 | kfree(deferred_flush); |
2907 | free_g_iommus: | 2905 | free_g_iommus: |
2908 | kfree(g_iommus); | 2906 | kfree(g_iommus); |
@@ -3682,7 +3680,7 @@ static inline void init_iommu_pm_ops(void) {} | |||
3682 | #endif /* CONFIG_PM */ | 3680 | #endif /* CONFIG_PM */ |
3683 | 3681 | ||
3684 | 3682 | ||
3685 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) | 3683 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) |
3686 | { | 3684 | { |
3687 | struct acpi_dmar_reserved_memory *rmrr; | 3685 | struct acpi_dmar_reserved_memory *rmrr; |
3688 | struct dmar_rmrr_unit *rmrru; | 3686 | struct dmar_rmrr_unit *rmrru; |
@@ -3708,17 +3706,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) | |||
3708 | return 0; | 3706 | return 0; |
3709 | } | 3707 | } |
3710 | 3708 | ||
3711 | int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) | 3709 | static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) |
3710 | { | ||
3711 | struct dmar_atsr_unit *atsru; | ||
3712 | struct acpi_dmar_atsr *tmp; | ||
3713 | |||
3714 | list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { | ||
3715 | tmp = (struct acpi_dmar_atsr *)atsru->hdr; | ||
3716 | if (atsr->segment != tmp->segment) | ||
3717 | continue; | ||
3718 | if (atsr->header.length != tmp->header.length) | ||
3719 | continue; | ||
3720 | if (memcmp(atsr, tmp, atsr->header.length) == 0) | ||
3721 | return atsru; | ||
3722 | } | ||
3723 | |||
3724 | return NULL; | ||
3725 | } | ||
3726 | |||
3727 | int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg) | ||
3712 | { | 3728 | { |
3713 | struct acpi_dmar_atsr *atsr; | 3729 | struct acpi_dmar_atsr *atsr; |
3714 | struct dmar_atsr_unit *atsru; | 3730 | struct dmar_atsr_unit *atsru; |
3715 | 3731 | ||
3732 | if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled) | ||
3733 | return 0; | ||
3734 | |||
3716 | atsr = container_of(hdr, struct acpi_dmar_atsr, header); | 3735 | atsr = container_of(hdr, struct acpi_dmar_atsr, header); |
3717 | atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); | 3736 | atsru = dmar_find_atsr(atsr); |
3737 | if (atsru) | ||
3738 | return 0; | ||
3739 | |||
3740 | atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL); | ||
3718 | if (!atsru) | 3741 | if (!atsru) |
3719 | return -ENOMEM; | 3742 | return -ENOMEM; |
3720 | 3743 | ||
3721 | atsru->hdr = hdr; | 3744 | /* |
3745 | * If memory is allocated from slab by ACPI _DSM method, we need to | ||
3746 | * copy the memory content because the memory buffer will be freed | ||
3747 | * on return. | ||
3748 | */ | ||
3749 | atsru->hdr = (void *)(atsru + 1); | ||
3750 | memcpy(atsru->hdr, hdr, hdr->length); | ||
3722 | atsru->include_all = atsr->flags & 0x1; | 3751 | atsru->include_all = atsr->flags & 0x1; |
3723 | if (!atsru->include_all) { | 3752 | if (!atsru->include_all) { |
3724 | atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), | 3753 | atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), |
@@ -3741,6 +3770,138 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru) | |||
3741 | kfree(atsru); | 3770 | kfree(atsru); |
3742 | } | 3771 | } |
3743 | 3772 | ||
3773 | int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg) | ||
3774 | { | ||
3775 | struct acpi_dmar_atsr *atsr; | ||
3776 | struct dmar_atsr_unit *atsru; | ||
3777 | |||
3778 | atsr = container_of(hdr, struct acpi_dmar_atsr, header); | ||
3779 | atsru = dmar_find_atsr(atsr); | ||
3780 | if (atsru) { | ||
3781 | list_del_rcu(&atsru->list); | ||
3782 | synchronize_rcu(); | ||
3783 | intel_iommu_free_atsr(atsru); | ||
3784 | } | ||
3785 | |||
3786 | return 0; | ||
3787 | } | ||
3788 | |||
3789 | int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg) | ||
3790 | { | ||
3791 | int i; | ||
3792 | struct device *dev; | ||
3793 | struct acpi_dmar_atsr *atsr; | ||
3794 | struct dmar_atsr_unit *atsru; | ||
3795 | |||
3796 | atsr = container_of(hdr, struct acpi_dmar_atsr, header); | ||
3797 | atsru = dmar_find_atsr(atsr); | ||
3798 | if (!atsru) | ||
3799 | return 0; | ||
3800 | |||
3801 | if (!atsru->include_all && atsru->devices && atsru->devices_cnt) | ||
3802 | for_each_active_dev_scope(atsru->devices, atsru->devices_cnt, | ||
3803 | i, dev) | ||
3804 | return -EBUSY; | ||
3805 | |||
3806 | return 0; | ||
3807 | } | ||
3808 | |||
3809 | static int intel_iommu_add(struct dmar_drhd_unit *dmaru) | ||
3810 | { | ||
3811 | int sp, ret = 0; | ||
3812 | struct intel_iommu *iommu = dmaru->iommu; | ||
3813 | |||
3814 | if (g_iommus[iommu->seq_id]) | ||
3815 | return 0; | ||
3816 | |||
3817 | if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { | ||
3818 | pr_warn("IOMMU: %s doesn't support hardware pass through.\n", | ||
3819 | iommu->name); | ||
3820 | return -ENXIO; | ||
3821 | } | ||
3822 | if (!ecap_sc_support(iommu->ecap) && | ||
3823 | domain_update_iommu_snooping(iommu)) { | ||
3824 | pr_warn("IOMMU: %s doesn't support snooping.\n", | ||
3825 | iommu->name); | ||
3826 | return -ENXIO; | ||
3827 | } | ||
3828 | sp = domain_update_iommu_superpage(iommu) - 1; | ||
3829 | if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { | ||
3830 | pr_warn("IOMMU: %s doesn't support large page.\n", | ||
3831 | iommu->name); | ||
3832 | return -ENXIO; | ||
3833 | } | ||
3834 | |||
3835 | /* | ||
3836 | * Disable translation if already enabled prior to OS handover. | ||
3837 | */ | ||
3838 | if (iommu->gcmd & DMA_GCMD_TE) | ||
3839 | iommu_disable_translation(iommu); | ||
3840 | |||
3841 | g_iommus[iommu->seq_id] = iommu; | ||
3842 | ret = iommu_init_domains(iommu); | ||
3843 | if (ret == 0) | ||
3844 | ret = iommu_alloc_root_entry(iommu); | ||
3845 | if (ret) | ||
3846 | goto out; | ||
3847 | |||
3848 | if (dmaru->ignored) { | ||
3849 | /* | ||
3850 | * we always have to disable PMRs or DMA may fail on this device | ||
3851 | */ | ||
3852 | if (force_on) | ||
3853 | iommu_disable_protect_mem_regions(iommu); | ||
3854 | return 0; | ||
3855 | } | ||
3856 | |||
3857 | intel_iommu_init_qi(iommu); | ||
3858 | iommu_flush_write_buffer(iommu); | ||
3859 | ret = dmar_set_interrupt(iommu); | ||
3860 | if (ret) | ||
3861 | goto disable_iommu; | ||
3862 | |||
3863 | iommu_set_root_entry(iommu); | ||
3864 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); | ||
3865 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); | ||
3866 | iommu_enable_translation(iommu); | ||
3867 | |||
3868 | if (si_domain) { | ||
3869 | ret = iommu_attach_domain(si_domain, iommu); | ||
3870 | if (ret < 0 || si_domain->id != ret) | ||
3871 | goto disable_iommu; | ||
3872 | domain_attach_iommu(si_domain, iommu); | ||
3873 | } | ||
3874 | |||
3875 | iommu_disable_protect_mem_regions(iommu); | ||
3876 | return 0; | ||
3877 | |||
3878 | disable_iommu: | ||
3879 | disable_dmar_iommu(iommu); | ||
3880 | out: | ||
3881 | free_dmar_iommu(iommu); | ||
3882 | return ret; | ||
3883 | } | ||
3884 | |||
3885 | int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) | ||
3886 | { | ||
3887 | int ret = 0; | ||
3888 | struct intel_iommu *iommu = dmaru->iommu; | ||
3889 | |||
3890 | if (!intel_iommu_enabled) | ||
3891 | return 0; | ||
3892 | if (iommu == NULL) | ||
3893 | return -EINVAL; | ||
3894 | |||
3895 | if (insert) { | ||
3896 | ret = intel_iommu_add(dmaru); | ||
3897 | } else { | ||
3898 | disable_dmar_iommu(iommu); | ||
3899 | free_dmar_iommu(iommu); | ||
3900 | } | ||
3901 | |||
3902 | return ret; | ||
3903 | } | ||
3904 | |||
3744 | static void intel_iommu_free_dmars(void) | 3905 | static void intel_iommu_free_dmars(void) |
3745 | { | 3906 | { |
3746 | struct dmar_rmrr_unit *rmrru, *rmrr_n; | 3907 | struct dmar_rmrr_unit *rmrru, *rmrr_n; |