diff options
-rw-r--r-- | drivers/iommu/Kconfig | 1 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 62 |
2 files changed, 46 insertions, 17 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e055d228bfb9..689ffe538370 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -142,7 +142,6 @@ config DMAR_TABLE | |||
142 | config INTEL_IOMMU | 142 | config INTEL_IOMMU |
143 | bool "Support for Intel IOMMU using DMA Remapping Devices" | 143 | bool "Support for Intel IOMMU using DMA Remapping Devices" |
144 | depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) | 144 | depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) |
145 | select DMA_DIRECT_OPS | ||
146 | select IOMMU_API | 145 | select IOMMU_API |
147 | select IOMMU_IOVA | 146 | select IOMMU_IOVA |
148 | select NEED_DMA_MAP_STATE | 147 | select NEED_DMA_MAP_STATE |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 14e4b3722428..b344a883f116 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
32 | #include <linux/dmar.h> | 32 | #include <linux/dmar.h> |
33 | #include <linux/dma-mapping.h> | 33 | #include <linux/dma-mapping.h> |
34 | #include <linux/dma-direct.h> | ||
35 | #include <linux/mempool.h> | 34 | #include <linux/mempool.h> |
36 | #include <linux/memory.h> | 35 | #include <linux/memory.h> |
37 | #include <linux/cpu.h> | 36 | #include <linux/cpu.h> |
@@ -3713,30 +3712,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, | |||
3713 | dma_addr_t *dma_handle, gfp_t flags, | 3712 | dma_addr_t *dma_handle, gfp_t flags, |
3714 | unsigned long attrs) | 3713 | unsigned long attrs) |
3715 | { | 3714 | { |
3716 | void *vaddr; | 3715 | struct page *page = NULL; |
3716 | int order; | ||
3717 | 3717 | ||
3718 | vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs); | 3718 | size = PAGE_ALIGN(size); |
3719 | if (iommu_no_mapping(dev) || !vaddr) | 3719 | order = get_order(size); |
3720 | return vaddr; | ||
3721 | 3720 | ||
3722 | *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr), | 3721 | if (!iommu_no_mapping(dev)) |
3723 | PAGE_ALIGN(size), DMA_BIDIRECTIONAL, | 3722 | flags &= ~(GFP_DMA | GFP_DMA32); |
3724 | dev->coherent_dma_mask); | 3723 | else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { |
3725 | if (!*dma_handle) | 3724 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) |
3726 | goto out_free_pages; | 3725 | flags |= GFP_DMA; |
3727 | return vaddr; | 3726 | else |
3727 | flags |= GFP_DMA32; | ||
3728 | } | ||
3729 | |||
3730 | if (gfpflags_allow_blocking(flags)) { | ||
3731 | unsigned int count = size >> PAGE_SHIFT; | ||
3732 | |||
3733 | page = dma_alloc_from_contiguous(dev, count, order, flags); | ||
3734 | if (page && iommu_no_mapping(dev) && | ||
3735 | page_to_phys(page) + size > dev->coherent_dma_mask) { | ||
3736 | dma_release_from_contiguous(dev, page, count); | ||
3737 | page = NULL; | ||
3738 | } | ||
3739 | } | ||
3740 | |||
3741 | if (!page) | ||
3742 | page = alloc_pages(flags, order); | ||
3743 | if (!page) | ||
3744 | return NULL; | ||
3745 | memset(page_address(page), 0, size); | ||
3746 | |||
3747 | *dma_handle = __intel_map_single(dev, page_to_phys(page), size, | ||
3748 | DMA_BIDIRECTIONAL, | ||
3749 | dev->coherent_dma_mask); | ||
3750 | if (*dma_handle) | ||
3751 | return page_address(page); | ||
3752 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) | ||
3753 | __free_pages(page, order); | ||
3728 | 3754 | ||
3729 | out_free_pages: | ||
3730 | dma_direct_free(dev, size, vaddr, *dma_handle, attrs); | ||
3731 | return NULL; | 3755 | return NULL; |
3732 | } | 3756 | } |
3733 | 3757 | ||
3734 | static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, | 3758 | static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, |
3735 | dma_addr_t dma_handle, unsigned long attrs) | 3759 | dma_addr_t dma_handle, unsigned long attrs) |
3736 | { | 3760 | { |
3737 | if (!iommu_no_mapping(dev)) | 3761 | int order; |
3738 | intel_unmap(dev, dma_handle, PAGE_ALIGN(size)); | 3762 | struct page *page = virt_to_page(vaddr); |
3739 | dma_direct_free(dev, size, vaddr, dma_handle, attrs); | 3763 | |
3764 | size = PAGE_ALIGN(size); | ||
3765 | order = get_order(size); | ||
3766 | |||
3767 | intel_unmap(dev, dma_handle, size); | ||
3768 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) | ||
3769 | __free_pages(page, order); | ||
3740 | } | 3770 | } |
3741 | 3771 | ||
3742 | static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, | 3772 | static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, |