summaryrefslogtreecommitdiffstats
path: root/kernel/dma
diff options
context:
space:
mode:
authorNicolin Chen <nicoleotsuka@gmail.com>2019-05-24 00:06:32 -0400
committerChristoph Hellwig <hch@lst.de>2019-06-03 10:00:07 -0400
commitb1d2dc009dece4cd7e629419b52266ba51960a6b (patch)
treea31a473e64a10c54420e767337fd288753b7d6e3 /kernel/dma
parent1b961423158caaae49d3900b7c9c37477bbfa9b3 (diff)
dma-contiguous: add dma_{alloc,free}_contiguous() helpers
Both dma_alloc_from_contiguous() and dma_release_from_contiguous() are very simply implemented, but requiring callers to pass certain parameters like count and align, and taking a boolean parameter to check __GFP_NOWARN in the allocation flags. So every function call duplicates similar work: unsigned long order = get_order(size); size_t count = size >> PAGE_SHIFT; page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); [...] dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); Additionally, as CMA can be used only in the context which permits sleeping, most of callers do a gfpflags_allow_blocking() check and a corresponding fallback allocation of normal pages upon any false result: if (gfpflags_allow_blocking(flag)) page = dma_alloc_from_contiguous(); if (!page) page = alloc_pages(); [...] if (!dma_release_from_contiguous(dev, page, count)) __free_pages(page, get_order(size)); So this patch simplifies those function calls by abstracting these operations into the two new functions: dma_{alloc,free}_contiguous. As some callers of dma_{alloc,release}_from_contiguous() might be complicated, this patch just implements these two new functions to kernel/dma/direct.c only as an initial step. Suggested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com> Tested-by: dann frazier <dann.frazier@canonical.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/contiguous.c47
-rw-r--r--kernel/dma/direct.c24
2 files changed, 51 insertions, 20 deletions
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index b2a87905846d..637b120d647b 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -214,6 +214,53 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
214 return cma_release(dev_get_cma_area(dev), pages, count); 214 return cma_release(dev_get_cma_area(dev), pages, count);
215} 215}
216 216
217/**
218 * dma_alloc_contiguous() - allocate contiguous pages
219 * @dev: Pointer to device for which the allocation is performed.
220 * @size: Requested allocation size.
221 * @gfp: Allocation flags.
222 *
223 * This function allocates contiguous memory buffer for specified device. It
224 * first tries to use device specific contiguous memory area if available or
225 * the default global one, then tries a fallback allocation of normal pages.
226 */
227struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
228{
229 int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
230 size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
231 size_t align = get_order(PAGE_ALIGN(size));
232 struct cma *cma = dev_get_cma_area(dev);
233 struct page *page = NULL;
234
235 /* CMA can be used only in the context which permits sleeping */
236 if (cma && gfpflags_allow_blocking(gfp)) {
237 align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
238 page = cma_alloc(cma, count, align, gfp & __GFP_NOWARN);
239 }
240
241 /* Fallback allocation of normal pages */
242 if (!page)
243 page = alloc_pages_node(node, gfp, align);
244 return page;
245}
246
247/**
248 * dma_free_contiguous() - release allocated pages
249 * @dev: Pointer to device for which the pages were allocated.
250 * @page: Pointer to the allocated pages.
251 * @size: Size of allocated pages.
252 *
253 * This function releases memory allocated by dma_alloc_contiguous(). As the
254 * cma_release returns false when provided pages do not belong to contiguous
255 * area and true otherwise, this function then does a fallback __free_pages()
256 * upon a false-return.
257 */
258void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
259{
260 if (!cma_release(dev_get_cma_area(dev), page, size >> PAGE_SHIFT))
261 __free_pages(page, get_order(size));
262}
263
217/* 264/*
218 * Support for reserved memory regions defined in device tree 265 * Support for reserved memory regions defined in device tree
219 */ 266 */
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 2c2772e9702a..0816c1e8b05a 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -96,8 +96,6 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
96struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, 96struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
97 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 97 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
98{ 98{
99 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
100 int page_order = get_order(size);
101 struct page *page = NULL; 99 struct page *page = NULL;
102 u64 phys_mask; 100 u64 phys_mask;
103 101
@@ -109,20 +107,9 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
109 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, 107 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
110 &phys_mask); 108 &phys_mask);
111again: 109again:
112 /* CMA can be used only in the context which permits sleeping */ 110 page = dma_alloc_contiguous(dev, size, gfp);
113 if (gfpflags_allow_blocking(gfp)) {
114 page = dma_alloc_from_contiguous(dev, count, page_order,
115 gfp & __GFP_NOWARN);
116 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
117 dma_release_from_contiguous(dev, page, count);
118 page = NULL;
119 }
120 }
121 if (!page)
122 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
123
124 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 111 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
125 __free_pages(page, page_order); 112 dma_free_contiguous(dev, page, size);
126 page = NULL; 113 page = NULL;
127 114
128 if (IS_ENABLED(CONFIG_ZONE_DMA32) && 115 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
@@ -154,7 +141,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
154 if (PageHighMem(page)) { 141 if (PageHighMem(page)) {
155 /* 142 /*
156 * Depending on the cma= arguments and per-arch setup 143 * Depending on the cma= arguments and per-arch setup
157 * dma_alloc_from_contiguous could return highmem pages. 144 * dma_alloc_contiguous could return highmem pages.
158 * Without remapping there is no way to return them here, 145 * Without remapping there is no way to return them here,
159 * so log an error and fail. 146 * so log an error and fail.
160 */ 147 */
@@ -176,10 +163,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
176 163
177void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page) 164void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
178{ 165{
179 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 166 dma_free_contiguous(dev, page, size);
180
181 if (!dma_release_from_contiguous(dev, page, count))
182 __free_pages(page, get_order(size));
183} 167}
184 168
185void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, 169void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,