aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-09-06 20:30:54 -0400
committerChristoph Hellwig <hch@lst.de>2018-10-01 10:28:00 -0400
commit7d21ee4c719f00896767ce19c4c01a56374c2ced (patch)
tree81ddf922c44d62db19a3048aa1fc0f2f1b2bdd76 /kernel
parenta20bb058375147cb639c7aa17ef86ad68b32d847 (diff)
dma-direct: refine dma_direct_alloc zone selection
We need to take the DMA offset and encryption bit into account when selecting a zone. User the opportunity to factor out the zone selection into a helper for reuse. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/dma/direct.c31
1 files changed, 21 insertions, 10 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index f32b33cfa331..e78548397a92 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -69,6 +69,22 @@ u64 dma_direct_get_required_mask(struct device *dev)
69 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; 69 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
70} 70}
71 71
72static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
73 u64 *phys_mask)
74{
75 if (force_dma_unencrypted())
76 *phys_mask = __dma_to_phys(dev, dma_mask);
77 else
78 *phys_mask = dma_to_phys(dev, dma_mask);
79
80 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
81 if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
82 return GFP_DMA;
83 if (*phys_mask <= DMA_BIT_MASK(32))
84 return GFP_DMA32;
85 return 0;
86}
87
72static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 88static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
73{ 89{
74 return phys_to_dma_direct(dev, phys) + size - 1 <= 90 return phys_to_dma_direct(dev, phys) + size - 1 <=
@@ -81,17 +97,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
81 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 97 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
82 int page_order = get_order(size); 98 int page_order = get_order(size);
83 struct page *page = NULL; 99 struct page *page = NULL;
100 u64 phys_mask;
84 void *ret; 101 void *ret;
85 102
86 /* we always manually zero the memory once we are done: */ 103 /* we always manually zero the memory once we are done: */
87 gfp &= ~__GFP_ZERO; 104 gfp &= ~__GFP_ZERO;
88 105 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
89 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ 106 &phys_mask);
90 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
91 gfp |= GFP_DMA;
92 if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
93 gfp |= GFP_DMA32;
94
95again: 107again:
96 /* CMA can be used only in the context which permits sleeping */ 108 /* CMA can be used only in the context which permits sleeping */
97 if (gfpflags_allow_blocking(gfp)) { 109 if (gfpflags_allow_blocking(gfp)) {
@@ -110,15 +122,14 @@ again:
110 page = NULL; 122 page = NULL;
111 123
112 if (IS_ENABLED(CONFIG_ZONE_DMA32) && 124 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
113 dev->coherent_dma_mask < DMA_BIT_MASK(64) && 125 phys_mask < DMA_BIT_MASK(64) &&
114 !(gfp & (GFP_DMA32 | GFP_DMA))) { 126 !(gfp & (GFP_DMA32 | GFP_DMA))) {
115 gfp |= GFP_DMA32; 127 gfp |= GFP_DMA32;
116 goto again; 128 goto again;
117 } 129 }
118 130
119 if (IS_ENABLED(CONFIG_ZONE_DMA) && 131 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
120 dev->coherent_dma_mask < DMA_BIT_MASK(32) && 132 phys_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) {
121 !(gfp & GFP_DMA)) {
122 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 133 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
123 goto again; 134 goto again;
124 } 135 }