diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 40 |
1 files changed, 28 insertions, 12 deletions
@@ -3459,29 +3459,45 @@ out: | |||
3459 | * @flags: See kmalloc(). | 3459 | * @flags: See kmalloc(). |
3460 | * @nodeid: node number of the target node. | 3460 | * @nodeid: node number of the target node. |
3461 | * | 3461 | * |
3462 | * Identical to kmem_cache_alloc, except that this function is slow | 3462 | * Identical to kmem_cache_alloc but it will allocate memory on the given |
3463 | * and can sleep. And it will allocate memory on the given node, which | 3463 | * node, which can improve the performance for cpu bound structures. |
3464 | * can improve the performance for cpu bound structures. | 3464 | * |
3465 | * New and improved: it will now make sure that the object gets | 3465 | * Fallback to other node is possible if __GFP_THISNODE is not set. |
3466 | * put on the correct node list so that there is no false sharing. | ||
3467 | */ | 3466 | */ |
3468 | static __always_inline void * | 3467 | static __always_inline void * |
3469 | __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, | 3468 | __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, |
3470 | int nodeid, void *caller) | 3469 | int nodeid, void *caller) |
3471 | { | 3470 | { |
3472 | unsigned long save_flags; | 3471 | unsigned long save_flags; |
3473 | void *ptr; | 3472 | void *ptr = NULL; |
3474 | 3473 | ||
3475 | cache_alloc_debugcheck_before(cachep, flags); | 3474 | cache_alloc_debugcheck_before(cachep, flags); |
3476 | local_irq_save(save_flags); | 3475 | local_irq_save(save_flags); |
3477 | 3476 | ||
3478 | if (nodeid == -1 || nodeid == numa_node_id() || | 3477 | if (unlikely(nodeid == -1)) |
3479 | !cachep->nodelists[nodeid]) | 3478 | nodeid = numa_node_id(); |
3480 | ptr = ____cache_alloc(cachep, flags); | ||
3481 | else | ||
3482 | ptr = ____cache_alloc_node(cachep, flags, nodeid); | ||
3483 | local_irq_restore(save_flags); | ||
3484 | 3479 | ||
3480 | if (likely(cachep->nodelists[nodeid])) { | ||
3481 | if (nodeid == numa_node_id()) { | ||
3482 | /* | ||
3483 | * Use the locally cached objects if possible. | ||
3484 | * However ____cache_alloc does not allow fallback | ||
3485 | * to other nodes. It may fail while we still have | ||
3486 | * objects on other nodes available. | ||
3487 | */ | ||
3488 | ptr = ____cache_alloc(cachep, flags); | ||
3489 | } | ||
3490 | if (!ptr) { | ||
3491 | /* ___cache_alloc_node can fall back to other nodes */ | ||
3492 | ptr = ____cache_alloc_node(cachep, flags, nodeid); | ||
3493 | } | ||
3494 | } else { | ||
3495 | /* Node not bootstrapped yet */ | ||
3496 | if (!(flags & __GFP_THISNODE)) | ||
3497 | ptr = fallback_alloc(cachep, flags); | ||
3498 | } | ||
3499 | |||
3500 | local_irq_restore(save_flags); | ||
3485 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); | 3501 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); |
3486 | 3502 | ||
3487 | return ptr; | 3503 | return ptr; |