aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 909975f6e090..c6100628a6ef 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3281,7 +3281,7 @@ retry:
3281 flags | GFP_THISNODE, nid); 3281 flags | GFP_THISNODE, nid);
3282 } 3282 }
3283 3283
3284 if (!obj) { 3284 if (!obj && !(flags & __GFP_NO_GROW)) {
3285 /* 3285 /*
3286 * This allocation will be performed within the constraints 3286 * This allocation will be performed within the constraints
3287 * of the current cpuset / memory policy requirements. 3287 * of the current cpuset / memory policy requirements.
@@ -3310,7 +3310,7 @@ retry:
3310 */ 3310 */
3311 goto retry; 3311 goto retry;
3312 } else { 3312 } else {
3313 kmem_freepages(cache, obj); 3313 /* cache_grow already freed obj */
3314 obj = NULL; 3314 obj = NULL;
3315 } 3315 }
3316 } 3316 }
@@ -3553,7 +3553,7 @@ EXPORT_SYMBOL(kmem_cache_zalloc);
3553 * 3553 *
3554 * Currently only used for dentry validation. 3554 * Currently only used for dentry validation.
3555 */ 3555 */
3556int fastcall kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) 3556int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
3557{ 3557{
3558 unsigned long addr = (unsigned long)ptr; 3558 unsigned long addr = (unsigned long)ptr;
3559 unsigned long min_addr = PAGE_OFFSET; 3559 unsigned long min_addr = PAGE_OFFSET;
@@ -3587,6 +3587,7 @@ out:
3587 * @cachep: The cache to allocate from. 3587 * @cachep: The cache to allocate from.
3588 * @flags: See kmalloc(). 3588 * @flags: See kmalloc().
3589 * @nodeid: node number of the target node. 3589 * @nodeid: node number of the target node.
3590 * @caller: return address of caller, used for debug information
3590 * 3591 *
3591 * Identical to kmem_cache_alloc but it will allocate memory on the given 3592 * Identical to kmem_cache_alloc but it will allocate memory on the given
3592 * node, which can improve the performance for cpu bound structures. 3593 * node, which can improve the performance for cpu bound structures.