diff options
-rw-r--r-- | lib/bucket_locks.c | 5 | ||||
-rw-r--r-- | mm/util.c | 6 |
2 files changed, 5 insertions, 6 deletions
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c index 266a97c5708b..ade3ce6c4af6 100644 --- a/lib/bucket_locks.c +++ b/lib/bucket_locks.c | |||
@@ -30,10 +30,7 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask, | |||
30 | } | 30 | } |
31 | 31 | ||
32 | if (sizeof(spinlock_t) != 0) { | 32 | if (sizeof(spinlock_t) != 0) { |
33 | if (gfpflags_allow_blocking(gfp)) | 33 | tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp); |
34 | tlocks = kvmalloc(size * sizeof(spinlock_t), gfp); | ||
35 | else | ||
36 | tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp); | ||
37 | if (!tlocks) | 34 | if (!tlocks) |
38 | return -ENOMEM; | 35 | return -ENOMEM; |
39 | for (i = 0; i < size; i++) | 36 | for (i = 0; i < size; i++) |
@@ -391,7 +391,8 @@ EXPORT_SYMBOL(vm_mmap); | |||
391 | * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is | 391 | * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is |
392 | * preferable to the vmalloc fallback, due to visible performance drawbacks. | 392 | * preferable to the vmalloc fallback, due to visible performance drawbacks. |
393 | * | 393 | * |
394 | * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people. | 394 | * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not |
395 | * fall back to vmalloc. | ||
395 | */ | 396 | */ |
396 | void *kvmalloc_node(size_t size, gfp_t flags, int node) | 397 | void *kvmalloc_node(size_t size, gfp_t flags, int node) |
397 | { | 398 | { |
@@ -402,7 +403,8 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) | |||
402 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) | 403 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) |
403 | * so the given set of flags has to be compatible. | 404 | * so the given set of flags has to be compatible. |
404 | */ | 405 | */ |
405 | WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); | 406 | if ((flags & GFP_KERNEL) != GFP_KERNEL) |
407 | return kmalloc_node(size, flags, node); | ||
406 | 408 | ||
407 | /* | 409 | /* |
408 | * We want to attempt a large physically contiguous block first because | 410 | * We want to attempt a large physically contiguous block first because |