diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-11-04 00:01:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-11-04 00:01:04 -0500 |
commit | f3ed88a6bce69942b44fa7927a92cd52011881d7 (patch) | |
tree | 98e58b314bcb0097294a81ef31306579cea752e1 /mm | |
parent | ce1928da8440eb5f73c20c5c88a449a4d59209d0 (diff) | |
parent | dda02fd6278d9e995850b3c1dba484f17cbe4de4 (diff) |
Merge branch 'fixes-for-v3.18' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull CMA and DMA-mapping fixes from Marek Szyprowski:
"This contains important fixes for recently introduced highmem support
for default contiguous memory region used for dma-mapping subsystem"
* 'fixes-for-v3.18' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping:
mm, cma: make parameters order consistent in func declaration and definition
mm: cma: Use %pa to print physical addresses
mm: cma: Ensure that reservations never cross the low/high mem boundary
mm: cma: Always consider a 0 base address reservation as dynamic
mm: cma: Don't crash on allocation if CMA area can't be activated
Diffstat (limited to 'mm')
-rw-r--r-- | mm/cma.c | 68 |
1 files changed, 44 insertions, 24 deletions
@@ -124,6 +124,7 @@ static int __init cma_activate_area(struct cma *cma) | |||
124 | 124 | ||
125 | err: | 125 | err: |
126 | kfree(cma->bitmap); | 126 | kfree(cma->bitmap); |
127 | cma->count = 0; | ||
127 | return -EINVAL; | 128 | return -EINVAL; |
128 | } | 129 | } |
129 | 130 | ||
@@ -217,9 +218,8 @@ int __init cma_declare_contiguous(phys_addr_t base, | |||
217 | phys_addr_t highmem_start = __pa(high_memory); | 218 | phys_addr_t highmem_start = __pa(high_memory); |
218 | int ret = 0; | 219 | int ret = 0; |
219 | 220 | ||
220 | pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n", | 221 | pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", |
221 | __func__, (unsigned long)size, (unsigned long)base, | 222 | __func__, &size, &base, &limit, &alignment); |
222 | (unsigned long)limit, (unsigned long)alignment); | ||
223 | 223 | ||
224 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { | 224 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
225 | pr_err("Not enough slots for CMA reserved regions!\n"); | 225 | pr_err("Not enough slots for CMA reserved regions!\n"); |
@@ -244,52 +244,72 @@ int __init cma_declare_contiguous(phys_addr_t base, | |||
244 | size = ALIGN(size, alignment); | 244 | size = ALIGN(size, alignment); |
245 | limit &= ~(alignment - 1); | 245 | limit &= ~(alignment - 1); |
246 | 246 | ||
247 | if (!base) | ||
248 | fixed = false; | ||
249 | |||
247 | /* size should be aligned with order_per_bit */ | 250 | /* size should be aligned with order_per_bit */ |
248 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) | 251 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) |
249 | return -EINVAL; | 252 | return -EINVAL; |
250 | 253 | ||
251 | /* | 254 | /* |
252 | * adjust limit to avoid crossing low/high memory boundary for | 255 | * If allocating at a fixed base the request region must not cross the |
253 | * automatically allocated regions | 256 | * low/high memory boundary. |
254 | */ | 257 | */ |
255 | if (((limit == 0 || limit > memblock_end) && | 258 | if (fixed && base < highmem_start && base + size > highmem_start) { |
256 | (memblock_end - size < highmem_start && | ||
257 | memblock_end > highmem_start)) || | ||
258 | (!fixed && limit > highmem_start && limit - size < highmem_start)) { | ||
259 | limit = highmem_start; | ||
260 | } | ||
261 | |||
262 | if (fixed && base < highmem_start && base+size > highmem_start) { | ||
263 | ret = -EINVAL; | 259 | ret = -EINVAL; |
264 | pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n", | 260 | pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", |
265 | (unsigned long)base, (unsigned long)highmem_start); | 261 | &base, &highmem_start); |
266 | goto err; | 262 | goto err; |
267 | } | 263 | } |
268 | 264 | ||
265 | /* | ||
266 | * If the limit is unspecified or above the memblock end, its effective | ||
267 | * value will be the memblock end. Set it explicitly to simplify further | ||
268 | * checks. | ||
269 | */ | ||
270 | if (limit == 0 || limit > memblock_end) | ||
271 | limit = memblock_end; | ||
272 | |||
269 | /* Reserve memory */ | 273 | /* Reserve memory */ |
270 | if (base && fixed) { | 274 | if (fixed) { |
271 | if (memblock_is_region_reserved(base, size) || | 275 | if (memblock_is_region_reserved(base, size) || |
272 | memblock_reserve(base, size) < 0) { | 276 | memblock_reserve(base, size) < 0) { |
273 | ret = -EBUSY; | 277 | ret = -EBUSY; |
274 | goto err; | 278 | goto err; |
275 | } | 279 | } |
276 | } else { | 280 | } else { |
277 | phys_addr_t addr = memblock_alloc_range(size, alignment, base, | 281 | phys_addr_t addr = 0; |
278 | limit); | 282 | |
283 | /* | ||
284 | * All pages in the reserved area must come from the same zone. | ||
285 | * If the requested region crosses the low/high memory boundary, | ||
286 | * try allocating from high memory first and fall back to low | ||
287 | * memory in case of failure. | ||
288 | */ | ||
289 | if (base < highmem_start && limit > highmem_start) { | ||
290 | addr = memblock_alloc_range(size, alignment, | ||
291 | highmem_start, limit); | ||
292 | limit = highmem_start; | ||
293 | } | ||
294 | |||
279 | if (!addr) { | 295 | if (!addr) { |
280 | ret = -ENOMEM; | 296 | addr = memblock_alloc_range(size, alignment, base, |
281 | goto err; | 297 | limit); |
282 | } else { | 298 | if (!addr) { |
283 | base = addr; | 299 | ret = -ENOMEM; |
300 | goto err; | ||
301 | } | ||
284 | } | 302 | } |
303 | |||
304 | base = addr; | ||
285 | } | 305 | } |
286 | 306 | ||
287 | ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); | 307 | ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); |
288 | if (ret) | 308 | if (ret) |
289 | goto err; | 309 | goto err; |
290 | 310 | ||
291 | pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, | 311 | pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, |
292 | (unsigned long)base); | 312 | &base); |
293 | return 0; | 313 | return 0; |
294 | 314 | ||
295 | err: | 315 | err: |