aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2015-04-01 08:58:47 -0400
committerJoerg Roedel <jroedel@suse.de>2015-04-02 07:31:07 -0400
commit3b839a57998515bb44c091bbcb8ea0da9d2adef4 (patch)
tree5b2a44993b8308f9d90305abdfd1b135b0d105c1 /drivers/iommu
parent5fc872c7323534e8f7dc21bab635e7a9b9659e07 (diff)
iommu/amd: Add support for contiguous dma allocator
Add code to allocate memory from the contiguous memory allocator to support coherent allocations larger than 8MB. Tested-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c44
1 files changed, 28 insertions, 16 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index c2e6f13d148f..49ecf003f7ca 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -33,6 +33,7 @@
33#include <linux/export.h> 33#include <linux/export.h>
34#include <linux/irq.h> 34#include <linux/irq.h>
35#include <linux/msi.h> 35#include <linux/msi.h>
36#include <linux/dma-contiguous.h>
36#include <asm/irq_remapping.h> 37#include <asm/irq_remapping.h>
37#include <asm/io_apic.h> 38#include <asm/io_apic.h>
38#include <asm/apic.h> 39#include <asm/apic.h>
@@ -2913,37 +2914,42 @@ static void *alloc_coherent(struct device *dev, size_t size,
2913 dma_addr_t *dma_addr, gfp_t flag, 2914 dma_addr_t *dma_addr, gfp_t flag,
2914 struct dma_attrs *attrs) 2915 struct dma_attrs *attrs)
2915{ 2916{
2916 unsigned long flags;
2917 void *virt_addr;
2918 struct protection_domain *domain;
2919 phys_addr_t paddr;
2920 u64 dma_mask = dev->coherent_dma_mask; 2917 u64 dma_mask = dev->coherent_dma_mask;
2918 struct protection_domain *domain;
2919 unsigned long flags;
2920 struct page *page;
2921 2921
2922 INC_STATS_COUNTER(cnt_alloc_coherent); 2922 INC_STATS_COUNTER(cnt_alloc_coherent);
2923 2923
2924 domain = get_domain(dev); 2924 domain = get_domain(dev);
2925 if (PTR_ERR(domain) == -EINVAL) { 2925 if (PTR_ERR(domain) == -EINVAL) {
2926 virt_addr = (void *)__get_free_pages(flag, get_order(size)); 2926 page = alloc_pages(flag, get_order(size));
2927 *dma_addr = __pa(virt_addr); 2927 *dma_addr = page_to_phys(page);
2928 return virt_addr; 2928 return page_address(page);
2929 } else if (IS_ERR(domain)) 2929 } else if (IS_ERR(domain))
2930 return NULL; 2930 return NULL;
2931 2931
2932 size = PAGE_ALIGN(size);
2932 dma_mask = dev->coherent_dma_mask; 2933 dma_mask = dev->coherent_dma_mask;
2933 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 2934 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2934 2935
2935 virt_addr = (void *)__get_free_pages(flag, get_order(size)); 2936 page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
2936 if (!virt_addr) 2937 if (!page) {
2937 return NULL; 2938 if (!(flag & __GFP_WAIT))
2939 return NULL;
2938 2940
2939 paddr = virt_to_phys(virt_addr); 2941 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
2942 get_order(size));
2943 if (!page)
2944 return NULL;
2945 }
2940 2946
2941 if (!dma_mask) 2947 if (!dma_mask)
2942 dma_mask = *dev->dma_mask; 2948 dma_mask = *dev->dma_mask;
2943 2949
2944 spin_lock_irqsave(&domain->lock, flags); 2950 spin_lock_irqsave(&domain->lock, flags);
2945 2951
2946 *dma_addr = __map_single(dev, domain->priv, paddr, 2952 *dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
2947 size, DMA_BIDIRECTIONAL, true, dma_mask); 2953 size, DMA_BIDIRECTIONAL, true, dma_mask);
2948 2954
2949 if (*dma_addr == DMA_ERROR_CODE) { 2955 if (*dma_addr == DMA_ERROR_CODE) {
@@ -2955,11 +2961,12 @@ static void *alloc_coherent(struct device *dev, size_t size,
2955 2961
2956 spin_unlock_irqrestore(&domain->lock, flags); 2962 spin_unlock_irqrestore(&domain->lock, flags);
2957 2963
2958 return virt_addr; 2964 return page_address(page);
2959 2965
2960out_free: 2966out_free:
2961 2967
2962 free_pages((unsigned long)virt_addr, get_order(size)); 2968 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2969 __free_pages(page, get_order(size));
2963 2970
2964 return NULL; 2971 return NULL;
2965} 2972}
@@ -2971,11 +2978,15 @@ static void free_coherent(struct device *dev, size_t size,
2971 void *virt_addr, dma_addr_t dma_addr, 2978 void *virt_addr, dma_addr_t dma_addr,
2972 struct dma_attrs *attrs) 2979 struct dma_attrs *attrs)
2973{ 2980{
2974 unsigned long flags;
2975 struct protection_domain *domain; 2981 struct protection_domain *domain;
2982 unsigned long flags;
2983 struct page *page;
2976 2984
2977 INC_STATS_COUNTER(cnt_free_coherent); 2985 INC_STATS_COUNTER(cnt_free_coherent);
2978 2986
2987 page = virt_to_page(virt_addr);
2988 size = PAGE_ALIGN(size);
2989
2979 domain = get_domain(dev); 2990 domain = get_domain(dev);
2980 if (IS_ERR(domain)) 2991 if (IS_ERR(domain))
2981 goto free_mem; 2992 goto free_mem;
@@ -2989,7 +3000,8 @@ static void free_coherent(struct device *dev, size_t size,
2989 spin_unlock_irqrestore(&domain->lock, flags); 3000 spin_unlock_irqrestore(&domain->lock, flags);
2990 3001
2991free_mem: 3002free_mem:
2992 free_pages((unsigned long)virt_addr, get_order(size)); 3003 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3004 __free_pages(page, get_order(size));
2993} 3005}
2994 3006
2995/* 3007/*