diff options
author | Lu Baolu <baolu.lu@linux.intel.com> | 2019-09-06 02:14:52 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2019-09-11 06:34:31 -0400 |
commit | cfb94a372f2d4ee226247447c863f8709863d170 (patch) | |
tree | b209631fec06e8a494fd5380ff7b9caf6c236557 /drivers/iommu/intel-iommu.c | |
parent | 3b53034c268d550d9e8522e613a14ab53b8840d8 (diff) |
iommu/vt-d: Use bounce buffer for untrusted devices
The Intel VT-d hardware uses paging for DMA remapping.
The minimum mapped window is a page size. The device
drivers may map buffers not filling the whole IOMMU
window. This allows the device to access to possibly
unrelated memory and a malicious device could exploit
this to perform DMA attacks. To address this, the
Intel IOMMU driver will use bounce pages for those
buffers which don't fill whole IOMMU pages.
Cc: Ashok Raj <ashok.raj@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Tested-by: Xu Pengfei <pengfei.xu@intel.com>
Tested-by: Mika Westerberg <mika.westerberg@intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 258 |
1 files changed, 258 insertions, 0 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 12831beead02..b034fe727ead 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -41,9 +41,11 @@ | |||
41 | #include <linux/dma-direct.h> | 41 | #include <linux/dma-direct.h> |
42 | #include <linux/crash_dump.h> | 42 | #include <linux/crash_dump.h> |
43 | #include <linux/numa.h> | 43 | #include <linux/numa.h> |
44 | #include <linux/swiotlb.h> | ||
44 | #include <asm/irq_remapping.h> | 45 | #include <asm/irq_remapping.h> |
45 | #include <asm/cacheflush.h> | 46 | #include <asm/cacheflush.h> |
46 | #include <asm/iommu.h> | 47 | #include <asm/iommu.h> |
48 | #include <trace/events/intel_iommu.h> | ||
47 | 49 | ||
48 | #include "irq_remapping.h" | 50 | #include "irq_remapping.h" |
49 | #include "intel-pasid.h" | 51 | #include "intel-pasid.h" |
@@ -344,6 +346,8 @@ static int domain_detach_iommu(struct dmar_domain *domain, | |||
344 | static bool device_is_rmrr_locked(struct device *dev); | 346 | static bool device_is_rmrr_locked(struct device *dev); |
345 | static int intel_iommu_attach_device(struct iommu_domain *domain, | 347 | static int intel_iommu_attach_device(struct iommu_domain *domain, |
346 | struct device *dev); | 348 | struct device *dev); |
349 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | ||
350 | dma_addr_t iova); | ||
347 | 351 | ||
348 | #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON | 352 | #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON |
349 | int dmar_disabled = 0; | 353 | int dmar_disabled = 0; |
@@ -3754,6 +3758,252 @@ static const struct dma_map_ops intel_dma_ops = { | |||
3754 | .dma_supported = dma_direct_supported, | 3758 | .dma_supported = dma_direct_supported, |
3755 | }; | 3759 | }; |
3756 | 3760 | ||
3761 | static void | ||
3762 | bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size, | ||
3763 | enum dma_data_direction dir, enum dma_sync_target target) | ||
3764 | { | ||
3765 | struct dmar_domain *domain; | ||
3766 | phys_addr_t tlb_addr; | ||
3767 | |||
3768 | domain = find_domain(dev); | ||
3769 | if (WARN_ON(!domain)) | ||
3770 | return; | ||
3771 | |||
3772 | tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr); | ||
3773 | if (is_swiotlb_buffer(tlb_addr)) | ||
3774 | swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target); | ||
3775 | } | ||
3776 | |||
3777 | static dma_addr_t | ||
3778 | bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size, | ||
3779 | enum dma_data_direction dir, unsigned long attrs, | ||
3780 | u64 dma_mask) | ||
3781 | { | ||
3782 | size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE); | ||
3783 | struct dmar_domain *domain; | ||
3784 | struct intel_iommu *iommu; | ||
3785 | unsigned long iova_pfn; | ||
3786 | unsigned long nrpages; | ||
3787 | phys_addr_t tlb_addr; | ||
3788 | int prot = 0; | ||
3789 | int ret; | ||
3790 | |||
3791 | domain = find_domain(dev); | ||
3792 | if (WARN_ON(dir == DMA_NONE || !domain)) | ||
3793 | return DMA_MAPPING_ERROR; | ||
3794 | |||
3795 | iommu = domain_get_iommu(domain); | ||
3796 | if (WARN_ON(!iommu)) | ||
3797 | return DMA_MAPPING_ERROR; | ||
3798 | |||
3799 | nrpages = aligned_nrpages(0, size); | ||
3800 | iova_pfn = intel_alloc_iova(dev, domain, | ||
3801 | dma_to_mm_pfn(nrpages), dma_mask); | ||
3802 | if (!iova_pfn) | ||
3803 | return DMA_MAPPING_ERROR; | ||
3804 | |||
3805 | /* | ||
3806 | * Check if DMAR supports zero-length reads on write only | ||
3807 | * mappings.. | ||
3808 | */ | ||
3809 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || | ||
3810 | !cap_zlr(iommu->cap)) | ||
3811 | prot |= DMA_PTE_READ; | ||
3812 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | ||
3813 | prot |= DMA_PTE_WRITE; | ||
3814 | |||
3815 | /* | ||
3816 | * If both the physical buffer start address and size are | ||
3817 | * page aligned, we don't need to use a bounce page. | ||
3818 | */ | ||
3819 | if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) { | ||
3820 | tlb_addr = swiotlb_tbl_map_single(dev, | ||
3821 | __phys_to_dma(dev, io_tlb_start), | ||
3822 | paddr, size, aligned_size, dir, attrs); | ||
3823 | if (tlb_addr == DMA_MAPPING_ERROR) { | ||
3824 | goto swiotlb_error; | ||
3825 | } else { | ||
3826 | /* Cleanup the padding area. */ | ||
3827 | void *padding_start = phys_to_virt(tlb_addr); | ||
3828 | size_t padding_size = aligned_size; | ||
3829 | |||
3830 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && | ||
3831 | (dir == DMA_TO_DEVICE || | ||
3832 | dir == DMA_BIDIRECTIONAL)) { | ||
3833 | padding_start += size; | ||
3834 | padding_size -= size; | ||
3835 | } | ||
3836 | |||
3837 | memset(padding_start, 0, padding_size); | ||
3838 | } | ||
3839 | } else { | ||
3840 | tlb_addr = paddr; | ||
3841 | } | ||
3842 | |||
3843 | ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn), | ||
3844 | tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot); | ||
3845 | if (ret) | ||
3846 | goto mapping_error; | ||
3847 | |||
3848 | trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size); | ||
3849 | |||
3850 | return (phys_addr_t)iova_pfn << PAGE_SHIFT; | ||
3851 | |||
3852 | mapping_error: | ||
3853 | if (is_swiotlb_buffer(tlb_addr)) | ||
3854 | swiotlb_tbl_unmap_single(dev, tlb_addr, size, | ||
3855 | aligned_size, dir, attrs); | ||
3856 | swiotlb_error: | ||
3857 | free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); | ||
3858 | dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n", | ||
3859 | size, (unsigned long long)paddr, dir); | ||
3860 | |||
3861 | return DMA_MAPPING_ERROR; | ||
3862 | } | ||
3863 | |||
3864 | static void | ||
3865 | bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | ||
3866 | enum dma_data_direction dir, unsigned long attrs) | ||
3867 | { | ||
3868 | size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE); | ||
3869 | struct dmar_domain *domain; | ||
3870 | phys_addr_t tlb_addr; | ||
3871 | |||
3872 | domain = find_domain(dev); | ||
3873 | if (WARN_ON(!domain)) | ||
3874 | return; | ||
3875 | |||
3876 | tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr); | ||
3877 | if (WARN_ON(!tlb_addr)) | ||
3878 | return; | ||
3879 | |||
3880 | intel_unmap(dev, dev_addr, size); | ||
3881 | if (is_swiotlb_buffer(tlb_addr)) | ||
3882 | swiotlb_tbl_unmap_single(dev, tlb_addr, size, | ||
3883 | aligned_size, dir, attrs); | ||
3884 | |||
3885 | trace_bounce_unmap_single(dev, dev_addr, size); | ||
3886 | } | ||
3887 | |||
3888 | static dma_addr_t | ||
3889 | bounce_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
3890 | size_t size, enum dma_data_direction dir, unsigned long attrs) | ||
3891 | { | ||
3892 | return bounce_map_single(dev, page_to_phys(page) + offset, | ||
3893 | size, dir, attrs, *dev->dma_mask); | ||
3894 | } | ||
3895 | |||
3896 | static dma_addr_t | ||
3897 | bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, | ||
3898 | enum dma_data_direction dir, unsigned long attrs) | ||
3899 | { | ||
3900 | return bounce_map_single(dev, phys_addr, size, | ||
3901 | dir, attrs, *dev->dma_mask); | ||
3902 | } | ||
3903 | |||
3904 | static void | ||
3905 | bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, | ||
3906 | enum dma_data_direction dir, unsigned long attrs) | ||
3907 | { | ||
3908 | bounce_unmap_single(dev, dev_addr, size, dir, attrs); | ||
3909 | } | ||
3910 | |||
3911 | static void | ||
3912 | bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size, | ||
3913 | enum dma_data_direction dir, unsigned long attrs) | ||
3914 | { | ||
3915 | bounce_unmap_single(dev, dev_addr, size, dir, attrs); | ||
3916 | } | ||
3917 | |||
3918 | static void | ||
3919 | bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, | ||
3920 | enum dma_data_direction dir, unsigned long attrs) | ||
3921 | { | ||
3922 | struct scatterlist *sg; | ||
3923 | int i; | ||
3924 | |||
3925 | for_each_sg(sglist, sg, nelems, i) | ||
3926 | bounce_unmap_page(dev, sg->dma_address, | ||
3927 | sg_dma_len(sg), dir, attrs); | ||
3928 | } | ||
3929 | |||
3930 | static int | ||
3931 | bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, | ||
3932 | enum dma_data_direction dir, unsigned long attrs) | ||
3933 | { | ||
3934 | int i; | ||
3935 | struct scatterlist *sg; | ||
3936 | |||
3937 | for_each_sg(sglist, sg, nelems, i) { | ||
3938 | sg->dma_address = bounce_map_page(dev, sg_page(sg), | ||
3939 | sg->offset, sg->length, | ||
3940 | dir, attrs); | ||
3941 | if (sg->dma_address == DMA_MAPPING_ERROR) | ||
3942 | goto out_unmap; | ||
3943 | sg_dma_len(sg) = sg->length; | ||
3944 | } | ||
3945 | |||
3946 | return nelems; | ||
3947 | |||
3948 | out_unmap: | ||
3949 | bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); | ||
3950 | return 0; | ||
3951 | } | ||
3952 | |||
3953 | static void | ||
3954 | bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | ||
3955 | size_t size, enum dma_data_direction dir) | ||
3956 | { | ||
3957 | bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU); | ||
3958 | } | ||
3959 | |||
3960 | static void | ||
3961 | bounce_sync_single_for_device(struct device *dev, dma_addr_t addr, | ||
3962 | size_t size, enum dma_data_direction dir) | ||
3963 | { | ||
3964 | bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE); | ||
3965 | } | ||
3966 | |||
3967 | static void | ||
3968 | bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, | ||
3969 | int nelems, enum dma_data_direction dir) | ||
3970 | { | ||
3971 | struct scatterlist *sg; | ||
3972 | int i; | ||
3973 | |||
3974 | for_each_sg(sglist, sg, nelems, i) | ||
3975 | bounce_sync_single(dev, sg_dma_address(sg), | ||
3976 | sg_dma_len(sg), dir, SYNC_FOR_CPU); | ||
3977 | } | ||
3978 | |||
3979 | static void | ||
3980 | bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | ||
3981 | int nelems, enum dma_data_direction dir) | ||
3982 | { | ||
3983 | struct scatterlist *sg; | ||
3984 | int i; | ||
3985 | |||
3986 | for_each_sg(sglist, sg, nelems, i) | ||
3987 | bounce_sync_single(dev, sg_dma_address(sg), | ||
3988 | sg_dma_len(sg), dir, SYNC_FOR_DEVICE); | ||
3989 | } | ||
3990 | |||
3991 | static const struct dma_map_ops bounce_dma_ops = { | ||
3992 | .alloc = intel_alloc_coherent, | ||
3993 | .free = intel_free_coherent, | ||
3994 | .map_sg = bounce_map_sg, | ||
3995 | .unmap_sg = bounce_unmap_sg, | ||
3996 | .map_page = bounce_map_page, | ||
3997 | .unmap_page = bounce_unmap_page, | ||
3998 | .sync_single_for_cpu = bounce_sync_single_for_cpu, | ||
3999 | .sync_single_for_device = bounce_sync_single_for_device, | ||
4000 | .sync_sg_for_cpu = bounce_sync_sg_for_cpu, | ||
4001 | .sync_sg_for_device = bounce_sync_sg_for_device, | ||
4002 | .map_resource = bounce_map_resource, | ||
4003 | .unmap_resource = bounce_unmap_resource, | ||
4004 | .dma_supported = dma_direct_supported, | ||
4005 | }; | ||
4006 | |||
3757 | static inline int iommu_domain_cache_init(void) | 4007 | static inline int iommu_domain_cache_init(void) |
3758 | { | 4008 | { |
3759 | int ret = 0; | 4009 | int ret = 0; |
@@ -5325,6 +5575,11 @@ static int intel_iommu_add_device(struct device *dev) | |||
5325 | } | 5575 | } |
5326 | } | 5576 | } |
5327 | 5577 | ||
5578 | if (device_needs_bounce(dev)) { | ||
5579 | dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n"); | ||
5580 | set_dma_ops(dev, &bounce_dma_ops); | ||
5581 | } | ||
5582 | |||
5328 | return 0; | 5583 | return 0; |
5329 | } | 5584 | } |
5330 | 5585 | ||
@@ -5342,6 +5597,9 @@ static void intel_iommu_remove_device(struct device *dev) | |||
5342 | iommu_group_remove_device(dev); | 5597 | iommu_group_remove_device(dev); |
5343 | 5598 | ||
5344 | iommu_device_unlink(&iommu->iommu, dev); | 5599 | iommu_device_unlink(&iommu->iommu, dev); |
5600 | |||
5601 | if (device_needs_bounce(dev)) | ||
5602 | set_dma_ops(dev, NULL); | ||
5345 | } | 5603 | } |
5346 | 5604 | ||
5347 | static void intel_iommu_get_resv_regions(struct device *device, | 5605 | static void intel_iommu_get_resv_regions(struct device *device, |