aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c200
1 files changed, 110 insertions, 90 deletions
diff --git a/mm/slab.c b/mm/slab.c
index aea1cd25314b..b595323e24a2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3189,35 +3189,6 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3189 return objp; 3189 return objp;
3190} 3190}
3191 3191
3192static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
3193 gfp_t flags, void *caller)
3194{
3195 unsigned long save_flags;
3196 void *objp = NULL;
3197
3198 cache_alloc_debugcheck_before(cachep, flags);
3199
3200 local_irq_save(save_flags);
3201
3202 if (unlikely(NUMA_BUILD &&
3203 current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
3204 objp = alternate_node_alloc(cachep, flags);
3205
3206 if (!objp)
3207 objp = ____cache_alloc(cachep, flags);
3208 /*
3209 * We may just have run out of memory on the local node.
3210 * ____cache_alloc_node() knows how to locate memory on other nodes
3211 */
3212 if (NUMA_BUILD && !objp)
3213 objp = ____cache_alloc_node(cachep, flags, numa_node_id());
3214 local_irq_restore(save_flags);
3215 objp = cache_alloc_debugcheck_after(cachep, flags, objp,
3216 caller);
3217 prefetchw(objp);
3218 return objp;
3219}
3220
3221#ifdef CONFIG_NUMA 3192#ifdef CONFIG_NUMA
3222/* 3193/*
3223 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. 3194 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
@@ -3249,14 +3220,20 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3249 * allocator to do its reclaim / fallback magic. We then insert the 3220 * allocator to do its reclaim / fallback magic. We then insert the
3250 * slab into the proper nodelist and then allocate from it. 3221 * slab into the proper nodelist and then allocate from it.
3251 */ 3222 */
3252void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3223static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3253{ 3224{
3254 struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy)) 3225 struct zonelist *zonelist;
3255 ->node_zonelists[gfp_zone(flags)]; 3226 gfp_t local_flags;
3256 struct zone **z; 3227 struct zone **z;
3257 void *obj = NULL; 3228 void *obj = NULL;
3258 int nid; 3229 int nid;
3259 gfp_t local_flags = (flags & GFP_LEVEL_MASK); 3230
3231 if (flags & __GFP_THISNODE)
3232 return NULL;
3233
3234 zonelist = &NODE_DATA(slab_node(current->mempolicy))
3235 ->node_zonelists[gfp_zone(flags)];
3236 local_flags = (flags & GFP_LEVEL_MASK);
3260 3237
3261retry: 3238retry:
3262 /* 3239 /*
@@ -3366,16 +3343,110 @@ must_grow:
3366 if (x) 3343 if (x)
3367 goto retry; 3344 goto retry;
3368 3345
3369 if (!(flags & __GFP_THISNODE)) 3346 return fallback_alloc(cachep, flags);
3370 /* Unable to grow the cache. Fall back to other nodes. */
3371 return fallback_alloc(cachep, flags);
3372
3373 return NULL;
3374 3347
3375done: 3348done:
3376 return obj; 3349 return obj;
3377} 3350}
3378#endif 3351
3352/**
3353 * kmem_cache_alloc_node - Allocate an object on the specified node
3354 * @cachep: The cache to allocate from.
3355 * @flags: See kmalloc().
3356 * @nodeid: node number of the target node.
3357 * @caller: return address of caller, used for debug information
3358 *
3359 * Identical to kmem_cache_alloc but it will allocate memory on the given
3360 * node, which can improve the performance for cpu bound structures.
3361 *
3362 * Fallback to other node is possible if __GFP_THISNODE is not set.
3363 */
3364static __always_inline void *
3365__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3366 void *caller)
3367{
3368 unsigned long save_flags;
3369 void *ptr;
3370
3371 cache_alloc_debugcheck_before(cachep, flags);
3372 local_irq_save(save_flags);
3373
3374 if (unlikely(nodeid == -1))
3375 nodeid = numa_node_id();
3376
3377 if (unlikely(!cachep->nodelists[nodeid])) {
3378 /* Node not bootstrapped yet */
3379 ptr = fallback_alloc(cachep, flags);
3380 goto out;
3381 }
3382
3383 if (nodeid == numa_node_id()) {
3384 /*
3385 * Use the locally cached objects if possible.
3386 * However ____cache_alloc does not allow fallback
3387 * to other nodes. It may fail while we still have
3388 * objects on other nodes available.
3389 */
3390 ptr = ____cache_alloc(cachep, flags);
3391 if (ptr)
3392 goto out;
3393 }
3394 /* ___cache_alloc_node can fall back to other nodes */
3395 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3396 out:
3397 local_irq_restore(save_flags);
3398 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3399
3400 return ptr;
3401}
3402
3403static __always_inline void *
3404__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3405{
3406 void *objp;
3407
3408 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3409 objp = alternate_node_alloc(cache, flags);
3410 if (objp)
3411 goto out;
3412 }
3413 objp = ____cache_alloc(cache, flags);
3414
3415 /*
3416 * We may just have run out of memory on the local node.
3417 * ____cache_alloc_node() knows how to locate memory on other nodes
3418 */
3419 if (!objp)
3420 objp = ____cache_alloc_node(cache, flags, numa_node_id());
3421
3422 out:
3423 return objp;
3424}
3425#else
3426
3427static __always_inline void *
3428__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3429{
3430 return ____cache_alloc(cachep, flags);
3431}
3432
3433#endif /* CONFIG_NUMA */
3434
3435static __always_inline void *
3436__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3437{
3438 unsigned long save_flags;
3439 void *objp;
3440
3441 cache_alloc_debugcheck_before(cachep, flags);
3442 local_irq_save(save_flags);
3443 objp = __do_cache_alloc(cachep, flags);
3444 local_irq_restore(save_flags);
3445 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3446 prefetchw(objp);
3447
3448 return objp;
3449}
3379 3450
3380/* 3451/*
3381 * Caller needs to acquire correct kmem_list's list_lock 3452 * Caller needs to acquire correct kmem_list's list_lock
@@ -3574,57 +3645,6 @@ out:
3574} 3645}
3575 3646
3576#ifdef CONFIG_NUMA 3647#ifdef CONFIG_NUMA
3577/**
3578 * kmem_cache_alloc_node - Allocate an object on the specified node
3579 * @cachep: The cache to allocate from.
3580 * @flags: See kmalloc().
3581 * @nodeid: node number of the target node.
3582 * @caller: return address of caller, used for debug information
3583 *
3584 * Identical to kmem_cache_alloc but it will allocate memory on the given
3585 * node, which can improve the performance for cpu bound structures.
3586 *
3587 * Fallback to other node is possible if __GFP_THISNODE is not set.
3588 */
3589static __always_inline void *
3590__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3591 int nodeid, void *caller)
3592{
3593 unsigned long save_flags;
3594 void *ptr = NULL;
3595
3596 cache_alloc_debugcheck_before(cachep, flags);
3597 local_irq_save(save_flags);
3598
3599 if (unlikely(nodeid == -1))
3600 nodeid = numa_node_id();
3601
3602 if (likely(cachep->nodelists[nodeid])) {
3603 if (nodeid == numa_node_id()) {
3604 /*
3605 * Use the locally cached objects if possible.
3606 * However ____cache_alloc does not allow fallback
3607 * to other nodes. It may fail while we still have
3608 * objects on other nodes available.
3609 */
3610 ptr = ____cache_alloc(cachep, flags);
3611 }
3612 if (!ptr) {
3613 /* ___cache_alloc_node can fall back to other nodes */
3614 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3615 }
3616 } else {
3617 /* Node not bootstrapped yet */
3618 if (!(flags & __GFP_THISNODE))
3619 ptr = fallback_alloc(cachep, flags);
3620 }
3621
3622 local_irq_restore(save_flags);
3623 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3624
3625 return ptr;
3626}
3627
3628void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3648void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3629{ 3649{
3630 return __cache_alloc_node(cachep, flags, nodeid, 3650 return __cache_alloc_node(cachep, flags, nodeid,