aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorPaul Jackson <pj@sgi.com>2006-03-24 06:16:08 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-24 10:33:23 -0500
commitc61afb181c649754ea221f104e268cbacfc993e3 (patch)
tree870917b3f9175cf1663a2620d989856913cfb5f8 /mm/slab.c
parent101a50019ae5e370d73984ee05d56dd3b08f330a (diff)
[PATCH] cpuset memory spread slab cache optimizations
The hooks in the slab cache allocator code path for support of NUMA mempolicies and cpuset memory spreading are in an important code path. Many systems will use neither feature. This patch optimizes those hooks down to a single check of some bits in the current tasks task_struct flags. For non NUMA systems, this hook and related code is already ifdef'd out. The optimization is done by using another task flag, set if the task is using a non-default NUMA mempolicy. Taking this flag bit along with the PF_SPREAD_PAGE and PF_SPREAD_SLAB flag bits added earlier in this 'cpuset memory spreading' patch set, one can check for the combination of any of these special case memory placement mechanisms with a single test of the current tasks task_struct flags. This patch also tightens up the code, to save a few bytes of kernel text space, and moves some of it out of line. Due to the nested inlines called from multiple places, we were ending up with three copies of this code, which once we get off the main code path (for local node allocation) seems a bit wasteful of instruction memory. Signed-off-by: Paul Jackson <pj@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c41
1 files changed, 28 insertions, 13 deletions
diff --git a/mm/slab.c b/mm/slab.c
index de516658d3d..f80b52388a1 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -899,6 +899,7 @@ static struct array_cache *alloc_arraycache(int node, int entries,
899 899
900#ifdef CONFIG_NUMA 900#ifdef CONFIG_NUMA
901static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); 901static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
902static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
902 903
903static struct array_cache **alloc_alien_cache(int node, int limit) 904static struct array_cache **alloc_alien_cache(int node, int limit)
904{ 905{
@@ -2808,19 +2809,11 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
2808 struct array_cache *ac; 2809 struct array_cache *ac;
2809 2810
2810#ifdef CONFIG_NUMA 2811#ifdef CONFIG_NUMA
2811 if (unlikely(current->mempolicy && !in_interrupt())) { 2812 if (unlikely(current->flags & (PF_SPREAD_PAGE | PF_SPREAD_SLAB |
2812 int nid = slab_node(current->mempolicy); 2813 PF_MEMPOLICY))) {
2813 2814 objp = alternate_node_alloc(cachep, flags);
2814 if (nid != numa_node_id()) 2815 if (objp != NULL)
2815 return __cache_alloc_node(cachep, flags, nid); 2816 return objp;
2816 }
2817 if (unlikely(cpuset_do_slab_mem_spread() &&
2818 (cachep->flags & SLAB_MEM_SPREAD) &&
2819 !in_interrupt())) {
2820 int nid = cpuset_mem_spread_node();
2821
2822 if (nid != numa_node_id())
2823 return __cache_alloc_node(cachep, flags, nid);
2824 } 2817 }
2825#endif 2818#endif
2826 2819
@@ -2856,6 +2849,28 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
2856 2849
2857#ifdef CONFIG_NUMA 2850#ifdef CONFIG_NUMA
2858/* 2851/*
2852 * Try allocating on another node if PF_SPREAD_PAGE|PF_SPREAD_SLAB|PF_MEMPOLICY.
2853 *
2854 * If we are in_interrupt, then process context, including cpusets and
2855 * mempolicy, may not apply and should not be used for allocation policy.
2856 */
2857static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
2858{
2859 int nid_alloc, nid_here;
2860
2861 if (in_interrupt())
2862 return NULL;
2863 nid_alloc = nid_here = numa_node_id();
2864 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
2865 nid_alloc = cpuset_mem_spread_node();
2866 else if (current->mempolicy)
2867 nid_alloc = slab_node(current->mempolicy);
2868 if (nid_alloc != nid_here)
2869 return __cache_alloc_node(cachep, flags, nid_alloc);
2870 return NULL;
2871}
2872
2873/*
2859 * A interface to enable slab creation on nodeid 2874 * A interface to enable slab creation on nodeid
2860 */ 2875 */
2861static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 2876static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,