diff options
author | Jiang Liu <jiang.liu@linux.intel.com> | 2014-11-09 09:48:02 -0500 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2014-11-18 05:18:36 -0500 |
commit | ffebeb46dd34736c90ffbca1ccb0bef8f4827c44 (patch) | |
tree | 3dde0f7830b8cb39ee0e229523d6d15aa4f88dce | |
parent | 51acce33c4df6ee23b5ad4c2e6c239e0d6f25771 (diff) |
iommu/vt-d: Enhance intel-iommu driver to support DMAR unit hotplug
Implement required callback functions for intel-iommu driver
to support DMAR unit hotplug.
Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Reviewed-by: Yijing Wang <wangyijing@huawei.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/intel-iommu.c | 206 |
1 files changed, 151 insertions, 55 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 7c49ab51904f..99bf651234a6 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -1127,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) | |||
1127 | unsigned long flags; | 1127 | unsigned long flags; |
1128 | 1128 | ||
1129 | root = (struct root_entry *)alloc_pgtable_page(iommu->node); | 1129 | root = (struct root_entry *)alloc_pgtable_page(iommu->node); |
1130 | if (!root) | 1130 | if (!root) { |
1131 | pr_err("IOMMU: allocating root entry for %s failed\n", | ||
1132 | iommu->name); | ||
1131 | return -ENOMEM; | 1133 | return -ENOMEM; |
1134 | } | ||
1132 | 1135 | ||
1133 | __iommu_flush_cache(iommu, root, ROOT_SIZE); | 1136 | __iommu_flush_cache(iommu, root, ROOT_SIZE); |
1134 | 1137 | ||
@@ -1468,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
1468 | return 0; | 1471 | return 0; |
1469 | } | 1472 | } |
1470 | 1473 | ||
1471 | static void free_dmar_iommu(struct intel_iommu *iommu) | 1474 | static void disable_dmar_iommu(struct intel_iommu *iommu) |
1472 | { | 1475 | { |
1473 | struct dmar_domain *domain; | 1476 | struct dmar_domain *domain; |
1474 | int i; | 1477 | int i; |
@@ -1492,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu) | |||
1492 | 1495 | ||
1493 | if (iommu->gcmd & DMA_GCMD_TE) | 1496 | if (iommu->gcmd & DMA_GCMD_TE) |
1494 | iommu_disable_translation(iommu); | 1497 | iommu_disable_translation(iommu); |
1498 | } | ||
1495 | 1499 | ||
1496 | kfree(iommu->domains); | 1500 | static void free_dmar_iommu(struct intel_iommu *iommu) |
1497 | kfree(iommu->domain_ids); | 1501 | { |
1498 | iommu->domains = NULL; | 1502 | if ((iommu->domains) && (iommu->domain_ids)) { |
1499 | iommu->domain_ids = NULL; | 1503 | kfree(iommu->domains); |
1504 | kfree(iommu->domain_ids); | ||
1505 | iommu->domains = NULL; | ||
1506 | iommu->domain_ids = NULL; | ||
1507 | } | ||
1500 | 1508 | ||
1501 | g_iommus[iommu->seq_id] = NULL; | 1509 | g_iommus[iommu->seq_id] = NULL; |
1502 | 1510 | ||
@@ -2703,6 +2711,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw) | |||
2703 | return 0; | 2711 | return 0; |
2704 | } | 2712 | } |
2705 | 2713 | ||
2714 | static void intel_iommu_init_qi(struct intel_iommu *iommu) | ||
2715 | { | ||
2716 | /* | ||
2717 | * Start from the sane iommu hardware state. | ||
2718 | * If the queued invalidation is already initialized by us | ||
2719 | * (for example, while enabling interrupt-remapping) then | ||
2720 | * we got the things already rolling from a sane state. | ||
2721 | */ | ||
2722 | if (!iommu->qi) { | ||
2723 | /* | ||
2724 | * Clear any previous faults. | ||
2725 | */ | ||
2726 | dmar_fault(-1, iommu); | ||
2727 | /* | ||
2728 | * Disable queued invalidation if supported and already enabled | ||
2729 | * before OS handover. | ||
2730 | */ | ||
2731 | dmar_disable_qi(iommu); | ||
2732 | } | ||
2733 | |||
2734 | if (dmar_enable_qi(iommu)) { | ||
2735 | /* | ||
2736 | * Queued Invalidate not enabled, use Register Based Invalidate | ||
2737 | */ | ||
2738 | iommu->flush.flush_context = __iommu_flush_context; | ||
2739 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; | ||
2740 | pr_info("IOMMU: %s using Register based invalidation\n", | ||
2741 | iommu->name); | ||
2742 | } else { | ||
2743 | iommu->flush.flush_context = qi_flush_context; | ||
2744 | iommu->flush.flush_iotlb = qi_flush_iotlb; | ||
2745 | pr_info("IOMMU: %s using Queued invalidation\n", iommu->name); | ||
2746 | } | ||
2747 | } | ||
2748 | |||
2706 | static int __init init_dmars(void) | 2749 | static int __init init_dmars(void) |
2707 | { | 2750 | { |
2708 | struct dmar_drhd_unit *drhd; | 2751 | struct dmar_drhd_unit *drhd; |
@@ -2731,6 +2774,10 @@ static int __init init_dmars(void) | |||
2731 | DMAR_UNITS_SUPPORTED); | 2774 | DMAR_UNITS_SUPPORTED); |
2732 | } | 2775 | } |
2733 | 2776 | ||
2777 | /* Preallocate enough resources for IOMMU hot-addition */ | ||
2778 | if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) | ||
2779 | g_num_of_iommus = DMAR_UNITS_SUPPORTED; | ||
2780 | |||
2734 | g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), | 2781 | g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), |
2735 | GFP_KERNEL); | 2782 | GFP_KERNEL); |
2736 | if (!g_iommus) { | 2783 | if (!g_iommus) { |
@@ -2759,58 +2806,14 @@ static int __init init_dmars(void) | |||
2759 | * among all IOMMU's. Need to Split it later. | 2806 | * among all IOMMU's. Need to Split it later. |
2760 | */ | 2807 | */ |
2761 | ret = iommu_alloc_root_entry(iommu); | 2808 | ret = iommu_alloc_root_entry(iommu); |
2762 | if (ret) { | 2809 | if (ret) |
2763 | printk(KERN_ERR "IOMMU: allocate root entry failed\n"); | ||
2764 | goto free_iommu; | 2810 | goto free_iommu; |
2765 | } | ||
2766 | if (!ecap_pass_through(iommu->ecap)) | 2811 | if (!ecap_pass_through(iommu->ecap)) |
2767 | hw_pass_through = 0; | 2812 | hw_pass_through = 0; |
2768 | } | 2813 | } |
2769 | 2814 | ||
2770 | /* | 2815 | for_each_active_iommu(iommu, drhd) |
2771 | * Start from the sane iommu hardware state. | 2816 | intel_iommu_init_qi(iommu); |
2772 | */ | ||
2773 | for_each_active_iommu(iommu, drhd) { | ||
2774 | /* | ||
2775 | * If the queued invalidation is already initialized by us | ||
2776 | * (for example, while enabling interrupt-remapping) then | ||
2777 | * we got the things already rolling from a sane state. | ||
2778 | */ | ||
2779 | if (iommu->qi) | ||
2780 | continue; | ||
2781 | |||
2782 | /* | ||
2783 | * Clear any previous faults. | ||
2784 | */ | ||
2785 | dmar_fault(-1, iommu); | ||
2786 | /* | ||
2787 | * Disable queued invalidation if supported and already enabled | ||
2788 | * before OS handover. | ||
2789 | */ | ||
2790 | dmar_disable_qi(iommu); | ||
2791 | } | ||
2792 | |||
2793 | for_each_active_iommu(iommu, drhd) { | ||
2794 | if (dmar_enable_qi(iommu)) { | ||
2795 | /* | ||
2796 | * Queued Invalidate not enabled, use Register Based | ||
2797 | * Invalidate | ||
2798 | */ | ||
2799 | iommu->flush.flush_context = __iommu_flush_context; | ||
2800 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; | ||
2801 | printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based " | ||
2802 | "invalidation\n", | ||
2803 | iommu->seq_id, | ||
2804 | (unsigned long long)drhd->reg_base_addr); | ||
2805 | } else { | ||
2806 | iommu->flush.flush_context = qi_flush_context; | ||
2807 | iommu->flush.flush_iotlb = qi_flush_iotlb; | ||
2808 | printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued " | ||
2809 | "invalidation\n", | ||
2810 | iommu->seq_id, | ||
2811 | (unsigned long long)drhd->reg_base_addr); | ||
2812 | } | ||
2813 | } | ||
2814 | 2817 | ||
2815 | if (iommu_pass_through) | 2818 | if (iommu_pass_through) |
2816 | iommu_identity_mapping |= IDENTMAP_ALL; | 2819 | iommu_identity_mapping |= IDENTMAP_ALL; |
@@ -2896,8 +2899,10 @@ static int __init init_dmars(void) | |||
2896 | return 0; | 2899 | return 0; |
2897 | 2900 | ||
2898 | free_iommu: | 2901 | free_iommu: |
2899 | for_each_active_iommu(iommu, drhd) | 2902 | for_each_active_iommu(iommu, drhd) { |
2903 | disable_dmar_iommu(iommu); | ||
2900 | free_dmar_iommu(iommu); | 2904 | free_dmar_iommu(iommu); |
2905 | } | ||
2901 | kfree(deferred_flush); | 2906 | kfree(deferred_flush); |
2902 | free_g_iommus: | 2907 | free_g_iommus: |
2903 | kfree(g_iommus); | 2908 | kfree(g_iommus); |
@@ -3803,9 +3808,100 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg) | |||
3803 | return 0; | 3808 | return 0; |
3804 | } | 3809 | } |
3805 | 3810 | ||
3811 | static int intel_iommu_add(struct dmar_drhd_unit *dmaru) | ||
3812 | { | ||
3813 | int sp, ret = 0; | ||
3814 | struct intel_iommu *iommu = dmaru->iommu; | ||
3815 | |||
3816 | if (g_iommus[iommu->seq_id]) | ||
3817 | return 0; | ||
3818 | |||
3819 | if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { | ||
3820 | pr_warn("IOMMU: %s doesn't support hardware pass through.\n", | ||
3821 | iommu->name); | ||
3822 | return -ENXIO; | ||
3823 | } | ||
3824 | if (!ecap_sc_support(iommu->ecap) && | ||
3825 | domain_update_iommu_snooping(iommu)) { | ||
3826 | pr_warn("IOMMU: %s doesn't support snooping.\n", | ||
3827 | iommu->name); | ||
3828 | return -ENXIO; | ||
3829 | } | ||
3830 | sp = domain_update_iommu_superpage(iommu) - 1; | ||
3831 | if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { | ||
3832 | pr_warn("IOMMU: %s doesn't support large page.\n", | ||
3833 | iommu->name); | ||
3834 | return -ENXIO; | ||
3835 | } | ||
3836 | |||
3837 | /* | ||
3838 | * Disable translation if already enabled prior to OS handover. | ||
3839 | */ | ||
3840 | if (iommu->gcmd & DMA_GCMD_TE) | ||
3841 | iommu_disable_translation(iommu); | ||
3842 | |||
3843 | g_iommus[iommu->seq_id] = iommu; | ||
3844 | ret = iommu_init_domains(iommu); | ||
3845 | if (ret == 0) | ||
3846 | ret = iommu_alloc_root_entry(iommu); | ||
3847 | if (ret) | ||
3848 | goto out; | ||
3849 | |||
3850 | if (dmaru->ignored) { | ||
3851 | /* | ||
3852 | * we always have to disable PMRs or DMA may fail on this device | ||
3853 | */ | ||
3854 | if (force_on) | ||
3855 | iommu_disable_protect_mem_regions(iommu); | ||
3856 | return 0; | ||
3857 | } | ||
3858 | |||
3859 | intel_iommu_init_qi(iommu); | ||
3860 | iommu_flush_write_buffer(iommu); | ||
3861 | ret = dmar_set_interrupt(iommu); | ||
3862 | if (ret) | ||
3863 | goto disable_iommu; | ||
3864 | |||
3865 | iommu_set_root_entry(iommu); | ||
3866 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); | ||
3867 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); | ||
3868 | iommu_enable_translation(iommu); | ||
3869 | |||
3870 | if (si_domain) { | ||
3871 | ret = iommu_attach_domain(si_domain, iommu); | ||
3872 | if (ret < 0 || si_domain->id != ret) | ||
3873 | goto disable_iommu; | ||
3874 | domain_attach_iommu(si_domain, iommu); | ||
3875 | } | ||
3876 | |||
3877 | iommu_disable_protect_mem_regions(iommu); | ||
3878 | return 0; | ||
3879 | |||
3880 | disable_iommu: | ||
3881 | disable_dmar_iommu(iommu); | ||
3882 | out: | ||
3883 | free_dmar_iommu(iommu); | ||
3884 | return ret; | ||
3885 | } | ||
3886 | |||
3806 | int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) | 3887 | int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) |
3807 | { | 3888 | { |
3808 | return intel_iommu_enabled ? -ENOSYS : 0; | 3889 | int ret = 0; |
3890 | struct intel_iommu *iommu = dmaru->iommu; | ||
3891 | |||
3892 | if (!intel_iommu_enabled) | ||
3893 | return 0; | ||
3894 | if (iommu == NULL) | ||
3895 | return -EINVAL; | ||
3896 | |||
3897 | if (insert) { | ||
3898 | ret = intel_iommu_add(dmaru); | ||
3899 | } else { | ||
3900 | disable_dmar_iommu(iommu); | ||
3901 | free_dmar_iommu(iommu); | ||
3902 | } | ||
3903 | |||
3904 | return ret; | ||
3809 | } | 3905 | } |
3810 | 3906 | ||
3811 | static void intel_iommu_free_dmars(void) | 3907 | static void intel_iommu_free_dmars(void) |