diff options
Diffstat (limited to 'mm/util.c')
-rw-r--r-- | mm/util.c | 57 |
1 files changed, 57 insertions, 0 deletions
@@ -329,6 +329,63 @@ unsigned long vm_mmap(struct file *file, unsigned long addr, | |||
329 | } | 329 | } |
330 | EXPORT_SYMBOL(vm_mmap); | 330 | EXPORT_SYMBOL(vm_mmap); |
331 | 331 | ||
332 | /** | ||
333 | * kvmalloc_node - attempt to allocate physically contiguous memory, but upon | ||
334 | * failure, fall back to non-contiguous (vmalloc) allocation. | ||
335 | * @size: size of the request. | ||
336 | * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. | ||
337 | * @node: numa node to allocate from | ||
338 | * | ||
339 | * Uses kmalloc to get the memory but if the allocation fails then falls back | ||
340 | * to the vmalloc allocator. Use kvfree for freeing the memory. | ||
341 | * | ||
342 | * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. __GFP_REPEAT | ||
343 | * is supported only for large (>32kB) allocations, and it should be used only if | ||
344 | * kmalloc is preferable to the vmalloc fallback, due to visible performance drawbacks. | ||
345 | * | ||
346 | * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people. | ||
347 | */ | ||
348 | void *kvmalloc_node(size_t size, gfp_t flags, int node) | ||
349 | { | ||
350 | gfp_t kmalloc_flags = flags; | ||
351 | void *ret; | ||
352 | |||
353 | /* | ||
354 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) | ||
355 | * so the given set of flags has to be compatible. | ||
356 | */ | ||
357 | WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); | ||
358 | |||
359 | /* | ||
360 | * Make sure that larger requests are not too disruptive - no OOM | ||
361 | * killer and no allocation failure warnings as we have a fallback | ||
362 | */ | ||
363 | if (size > PAGE_SIZE) { | ||
364 | kmalloc_flags |= __GFP_NOWARN; | ||
365 | |||
366 | /* | ||
367 | * We have to override __GFP_REPEAT by __GFP_NORETRY for !costly | ||
368 | * requests because there is no other way to tell the allocator | ||
369 | * that we want to fail rather than retry endlessly. | ||
370 | */ | ||
371 | if (!(kmalloc_flags & __GFP_REPEAT) || | ||
372 | (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) | ||
373 | kmalloc_flags |= __GFP_NORETRY; | ||
374 | } | ||
375 | |||
376 | ret = kmalloc_node(size, kmalloc_flags, node); | ||
377 | |||
378 | /* | ||
379 | * It doesn't really make sense to fallback to vmalloc for sub page | ||
380 | * requests | ||
381 | */ | ||
382 | if (ret || size <= PAGE_SIZE) | ||
383 | return ret; | ||
384 | |||
385 | return __vmalloc_node_flags(size, node, flags); | ||
386 | } | ||
387 | EXPORT_SYMBOL(kvmalloc_node); | ||
388 | |||
332 | void kvfree(const void *addr) | 389 | void kvfree(const void *addr) |
333 | { | 390 | { |
334 | if (is_vmalloc_addr(addr)) | 391 | if (is_vmalloc_addr(addr)) |