diff options
author | Christoph Hellwig <hch@lst.de> | 2018-01-09 17:40:57 -0500 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2018-01-15 03:35:13 -0500 |
commit | 95f183916d4b0bc1943684948ecdd2469f1aa978 (patch) | |
tree | 4f35e71033398e1ac96e90454a84f1ad2402492e | |
parent | c61e9637340e1d0672c6f0c0b11aa81d7262c49d (diff) |
dma-direct: retry allocations using GFP_DMA for small masks
If an attempt to allocate memory succeeded, but isn't inside the
supported DMA mask, retry the allocation with GFP_DMA set as a
last resort.
Based on the x86 code, but an off by one error in what is now
dma_coherent_ok has been fixed vs the x86 code.
Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r-- | lib/dma-direct.c | 25 |
1 files changed, 24 insertions, 1 deletions
diff --git a/lib/dma-direct.c b/lib/dma-direct.c index 8f76032ebc3c..4e43c2bb7f5f 100644 --- a/lib/dma-direct.c +++ b/lib/dma-direct.c | |||
@@ -35,6 +35,11 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
35 | return true; | 35 | return true; |
36 | } | 36 | } |
37 | 37 | ||
38 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) | ||
39 | { | ||
40 | return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask; | ||
41 | } | ||
42 | |||
38 | static void *dma_direct_alloc(struct device *dev, size_t size, | 43 | static void *dma_direct_alloc(struct device *dev, size_t size, |
39 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) | 44 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
40 | { | 45 | { |
@@ -48,11 +53,29 @@ static void *dma_direct_alloc(struct device *dev, size_t size, | |||
48 | if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) | 53 | if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) |
49 | gfp |= GFP_DMA32; | 54 | gfp |= GFP_DMA32; |
50 | 55 | ||
56 | again: | ||
51 | /* CMA can be used only in the context which permits sleeping */ | 57 | /* CMA can be used only in the context which permits sleeping */ |
52 | if (gfpflags_allow_blocking(gfp)) | 58 | if (gfpflags_allow_blocking(gfp)) { |
53 | page = dma_alloc_from_contiguous(dev, count, page_order, gfp); | 59 | page = dma_alloc_from_contiguous(dev, count, page_order, gfp); |
60 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { | ||
61 | dma_release_from_contiguous(dev, page, count); | ||
62 | page = NULL; | ||
63 | } | ||
64 | } | ||
54 | if (!page) | 65 | if (!page) |
55 | page = alloc_pages_node(dev_to_node(dev), gfp, page_order); | 66 | page = alloc_pages_node(dev_to_node(dev), gfp, page_order); |
67 | |||
68 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { | ||
69 | __free_pages(page, page_order); | ||
70 | page = NULL; | ||
71 | |||
72 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32) && | ||
73 | !(gfp & GFP_DMA)) { | ||
74 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | ||
75 | goto again; | ||
76 | } | ||
77 | } | ||
78 | |||
56 | if (!page) | 79 | if (!page) |
57 | return NULL; | 80 | return NULL; |
58 | 81 | ||