aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-12-06 23:33:24 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 11:39:25 -0500
commit5bcd234d881d83ac0259c6d42d98f134e31c60a8 (patch)
tree40d58218ce224200336c449ba035bcb6ec119d89
parent1b1cec4bbc59feac89670d5d6d222a02545bac94 (diff)
[PATCH] slab: fix two issues in kmalloc_node / __cache_alloc_node
This addresses two issues: 1. Kmalloc_node() may intermittently return NULL if we are allocating from the current node and are unable to obtain memory for the current node from the page allocator. This is because we call ___cache_alloc() if nodeid == numa_node_id() and ____cache_alloc is not able to fallback to other nodes. This was introduced in the 2.6.19 development cycle. <= 2.6.18 in that case does not do a restricted allocation and blindly trusts the page allocator to have given us memory from the indicated node. It inserts the page regardless of the node it came from into the queues for the current node. 2. If kmalloc_node() is used on a node that has not been bootstrapped yet then we may try to pass an invalid node number to ____cache_alloc_node() triggering a BUG(). Change the function to call fallback_alloc() instead. Only call fallback_alloc() if we are allowed to fallback at all. The need to handle a node not bootstrapped yet also first surfaced in the 2.6.19 cycle. Update the comments since they were still describing the old kmalloc_node from 2.6.12. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/slab.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/mm/slab.c b/mm/slab.c
index bb831ba63e1e..6da554fd3f6a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3459,29 +3459,45 @@ out:
3459 * @flags: See kmalloc(). 3459 * @flags: See kmalloc().
3460 * @nodeid: node number of the target node. 3460 * @nodeid: node number of the target node.
3461 * 3461 *
3462 * Identical to kmem_cache_alloc, except that this function is slow 3462 * Identical to kmem_cache_alloc but it will allocate memory on the given
3463 * and can sleep. And it will allocate memory on the given node, which 3463 * node, which can improve the performance for cpu bound structures.
3464 * can improve the performance for cpu bound structures. 3464 *
3465 * New and improved: it will now make sure that the object gets 3465 * Fallback to other node is possible if __GFP_THISNODE is not set.
3466 * put on the correct node list so that there is no false sharing.
3467 */ 3466 */
3468static __always_inline void * 3467static __always_inline void *
3469__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3468__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3470 int nodeid, void *caller) 3469 int nodeid, void *caller)
3471{ 3470{
3472 unsigned long save_flags; 3471 unsigned long save_flags;
3473 void *ptr; 3472 void *ptr = NULL;
3474 3473
3475 cache_alloc_debugcheck_before(cachep, flags); 3474 cache_alloc_debugcheck_before(cachep, flags);
3476 local_irq_save(save_flags); 3475 local_irq_save(save_flags);
3477 3476
3478 if (nodeid == -1 || nodeid == numa_node_id() || 3477 if (unlikely(nodeid == -1))
3479 !cachep->nodelists[nodeid]) 3478 nodeid = numa_node_id();
3480 ptr = ____cache_alloc(cachep, flags);
3481 else
3482 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3483 local_irq_restore(save_flags);
3484 3479
3480 if (likely(cachep->nodelists[nodeid])) {
3481 if (nodeid == numa_node_id()) {
3482 /*
3483 * Use the locally cached objects if possible.
3484 * However ____cache_alloc does not allow fallback
3485 * to other nodes. It may fail while we still have
3486 * objects on other nodes available.
3487 */
3488 ptr = ____cache_alloc(cachep, flags);
3489 }
3490 if (!ptr) {
3491 /* ___cache_alloc_node can fall back to other nodes */
3492 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3493 }
3494 } else {
3495 /* Node not bootstrapped yet */
3496 if (!(flags & __GFP_THISNODE))
3497 ptr = fallback_alloc(cachep, flags);
3498 }
3499
3500 local_irq_restore(save_flags);
3485 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3501 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3486 3502
3487 return ptr; 3503 return ptr;