aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c30
-rw-r--r--mm/slab.c12
2 files changed, 42 insertions, 0 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index a683a66599b1..71430d440822 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -976,6 +976,36 @@ static unsigned interleave_nodes(struct mempolicy *policy)
976 return nid; 976 return nid;
977} 977}
978 978
979/*
980 * Depending on the memory policy provide a node from which to allocate the
981 * next slab entry.
982 */
983unsigned slab_node(struct mempolicy *policy)
984{
985 if (in_interrupt())
986 return numa_node_id();
987
988 switch (policy->policy) {
989 case MPOL_INTERLEAVE:
990 return interleave_nodes(policy);
991
992 case MPOL_BIND:
993 /*
994 * Follow bind policy behavior and start allocation at the
995 * first node.
996 */
997 return policy->v.zonelist->zones[0]->zone_pgdat->node_id;
998
999 case MPOL_PREFERRED:
1000 if (policy->v.preferred_node >= 0)
1001 return policy->v.preferred_node;
1002 /* Fall through */
1003
1004 default:
1005 return numa_node_id();
1006 }
1007}
1008
979/* Do static interleaving for a VMA with known offset. */ 1009/* Do static interleaving for a VMA with known offset. */
980static unsigned offset_il_node(struct mempolicy *pol, 1010static unsigned offset_il_node(struct mempolicy *pol,
981 struct vm_area_struct *vma, unsigned long off) 1011 struct vm_area_struct *vma, unsigned long off)
diff --git a/mm/slab.c b/mm/slab.c
index bd0317f1e06c..9025608696ec 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -103,6 +103,7 @@
103#include <linux/rcupdate.h> 103#include <linux/rcupdate.h>
104#include <linux/string.h> 104#include <linux/string.h>
105#include <linux/nodemask.h> 105#include <linux/nodemask.h>
106#include <linux/mempolicy.h>
106#include <linux/mutex.h> 107#include <linux/mutex.h>
107 108
108#include <asm/uaccess.h> 109#include <asm/uaccess.h>
@@ -773,6 +774,8 @@ static struct array_cache *alloc_arraycache(int node, int entries,
773} 774}
774 775
775#ifdef CONFIG_NUMA 776#ifdef CONFIG_NUMA
777static void *__cache_alloc_node(kmem_cache_t *, gfp_t, int);
778
776static inline struct array_cache **alloc_alien_cache(int node, int limit) 779static inline struct array_cache **alloc_alien_cache(int node, int limit)
777{ 780{
778 struct array_cache **ac_ptr; 781 struct array_cache **ac_ptr;
@@ -2570,6 +2573,15 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2570 void *objp; 2573 void *objp;
2571 struct array_cache *ac; 2574 struct array_cache *ac;
2572 2575
2576#ifdef CONFIG_NUMA
2577 if (current->mempolicy) {
2578 int nid = slab_node(current->mempolicy);
2579
2580 if (nid != numa_node_id())
2581 return __cache_alloc_node(cachep, flags, nid);
2582 }
2583#endif
2584
2573 check_irq_off(); 2585 check_irq_off();
2574 ac = ac_data(cachep); 2586 ac = ac_data(cachep);
2575 if (likely(ac->avail)) { 2587 if (likely(ac->avail)) {