aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c41
1 files changed, 28 insertions, 13 deletions
diff --git a/mm/slab.c b/mm/slab.c
index de516658d3d8..f80b52388a12 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -899,6 +899,7 @@ static struct array_cache *alloc_arraycache(int node, int entries,
899 899
900#ifdef CONFIG_NUMA 900#ifdef CONFIG_NUMA
901static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); 901static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
902static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
902 903
903static struct array_cache **alloc_alien_cache(int node, int limit) 904static struct array_cache **alloc_alien_cache(int node, int limit)
904{ 905{
@@ -2808,19 +2809,11 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
2808 struct array_cache *ac; 2809 struct array_cache *ac;
2809 2810
2810#ifdef CONFIG_NUMA 2811#ifdef CONFIG_NUMA
2811 if (unlikely(current->mempolicy && !in_interrupt())) { 2812 if (unlikely(current->flags & (PF_SPREAD_PAGE | PF_SPREAD_SLAB |
2812 int nid = slab_node(current->mempolicy); 2813 PF_MEMPOLICY))) {
2813 2814 objp = alternate_node_alloc(cachep, flags);
2814 if (nid != numa_node_id()) 2815 if (objp != NULL)
2815 return __cache_alloc_node(cachep, flags, nid); 2816 return objp;
2816 }
2817 if (unlikely(cpuset_do_slab_mem_spread() &&
2818 (cachep->flags & SLAB_MEM_SPREAD) &&
2819 !in_interrupt())) {
2820 int nid = cpuset_mem_spread_node();
2821
2822 if (nid != numa_node_id())
2823 return __cache_alloc_node(cachep, flags, nid);
2824 } 2817 }
2825#endif 2818#endif
2826 2819
@@ -2856,6 +2849,28 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
2856 2849
2857#ifdef CONFIG_NUMA 2850#ifdef CONFIG_NUMA
2858/* 2851/*
2852 * Try allocating on another node if PF_SPREAD_PAGE|PF_SPREAD_SLAB|PF_MEMPOLICY.
2853 *
2854 * If we are in_interrupt, then process context, including cpusets and
2855 * mempolicy, may not apply and should not be used for allocation policy.
2856 */
2857static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
2858{
2859 int nid_alloc, nid_here;
2860
2861 if (in_interrupt())
2862 return NULL;
2863 nid_alloc = nid_here = numa_node_id();
2864 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
2865 nid_alloc = cpuset_mem_spread_node();
2866 else if (current->mempolicy)
2867 nid_alloc = slab_node(current->mempolicy);
2868 if (nid_alloc != nid_here)
2869 return __cache_alloc_node(cachep, flags, nid_alloc);
2870 return NULL;
2871}
2872
2873/*
2859 * A interface to enable slab creation on nodeid 2874 * A interface to enable slab creation on nodeid
2860 */ 2875 */
2861static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 2876static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,