aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-09-27 04:50:03 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-27 11:26:12 -0400
commitde3083ec3e6bfb1ab60bc8a410f37702529f953c (patch)
tree4093fcf3991e9af4be57c7c942dbea42011e4eb7 /mm/slab.c
parent0fd0e6b05aa096622f151cac2f81f2e6844fb1bb (diff)
[PATCH] slab: fix kmalloc_node applying memory policies if nodeid == numa_node_id()
kmalloc_node() falls back to ___cache_alloc() under certain conditions and at that point memory policies may be applied redirecting the allocation away from the current node. Therefore kmalloc_node(...,numa_node_id()) or kmalloc_node(...,-1) may not return memory from the local node. Fix this by doing the policy check in __cache_alloc() instead of ____cache_alloc(). This version here is a cleanup of Kiran's patch. - Tested on ia64. - Extra material removed. - Consolidate the exit path if alternate_node_alloc() returned an object. [akpm@osdl.org: warning fix] Signed-off-by: Alok N Kataria <alok.kataria@calsoftinc.com> Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org> Signed-off-by: Shai Fultheim <shai@scalex86.org> Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/mm/slab.c b/mm/slab.c
index c52ebf9c4462..69e11c45002f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3028,14 +3028,6 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3028 void *objp; 3028 void *objp;
3029 struct array_cache *ac; 3029 struct array_cache *ac;
3030 3030
3031#ifdef CONFIG_NUMA
3032 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3033 objp = alternate_node_alloc(cachep, flags);
3034 if (objp != NULL)
3035 return objp;
3036 }
3037#endif
3038
3039 check_irq_off(); 3031 check_irq_off();
3040 ac = cpu_cache_get(cachep); 3032 ac = cpu_cache_get(cachep);
3041 if (likely(ac->avail)) { 3033 if (likely(ac->avail)) {
@@ -3053,12 +3045,19 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
3053 gfp_t flags, void *caller) 3045 gfp_t flags, void *caller)
3054{ 3046{
3055 unsigned long save_flags; 3047 unsigned long save_flags;
3056 void *objp; 3048 void *objp = NULL;
3057 3049
3058 cache_alloc_debugcheck_before(cachep, flags); 3050 cache_alloc_debugcheck_before(cachep, flags);
3059 3051
3060 local_irq_save(save_flags); 3052 local_irq_save(save_flags);
3061 objp = ____cache_alloc(cachep, flags); 3053
3054#ifdef CONFIG_NUMA
3055 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
3056 objp = alternate_node_alloc(cachep, flags);
3057#endif
3058
3059 if (!objp)
3060 objp = ____cache_alloc(cachep, flags);
3062 local_irq_restore(save_flags); 3061 local_irq_restore(save_flags);
3063 objp = cache_alloc_debugcheck_after(cachep, flags, objp, 3062 objp = cache_alloc_debugcheck_after(cachep, flags, objp,
3064 caller); 3063 caller);