diff options
-rw-r--r-- | mm/mempolicy.c | 4 | ||||
-rw-r--r-- | mm/slab.c | 107 |
2 files changed, 81 insertions, 30 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 38f89650bc84..cf18f0942553 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1136,7 +1136,9 @@ static unsigned interleave_nodes(struct mempolicy *policy) | |||
1136 | */ | 1136 | */ |
1137 | unsigned slab_node(struct mempolicy *policy) | 1137 | unsigned slab_node(struct mempolicy *policy) |
1138 | { | 1138 | { |
1139 | switch (policy->policy) { | 1139 | int pol = policy ? policy->policy : MPOL_DEFAULT; |
1140 | |||
1141 | switch (pol) { | ||
1140 | case MPOL_INTERLEAVE: | 1142 | case MPOL_INTERLEAVE: |
1141 | return interleave_nodes(policy); | 1143 | return interleave_nodes(policy); |
1142 | 1144 | ||
@@ -972,7 +972,39 @@ static int transfer_objects(struct array_cache *to, | |||
972 | return nr; | 972 | return nr; |
973 | } | 973 | } |
974 | 974 | ||
975 | #ifdef CONFIG_NUMA | 975 | #ifndef CONFIG_NUMA |
976 | |||
977 | #define drain_alien_cache(cachep, alien) do { } while (0) | ||
978 | #define reap_alien(cachep, l3) do { } while (0) | ||
979 | |||
980 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | ||
981 | { | ||
982 | return (struct array_cache **)BAD_ALIEN_MAGIC; | ||
983 | } | ||
984 | |||
985 | static inline void free_alien_cache(struct array_cache **ac_ptr) | ||
986 | { | ||
987 | } | ||
988 | |||
989 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | ||
990 | { | ||
991 | return 0; | ||
992 | } | ||
993 | |||
994 | static inline void *alternate_node_alloc(struct kmem_cache *cachep, | ||
995 | gfp_t flags) | ||
996 | { | ||
997 | return NULL; | ||
998 | } | ||
999 | |||
1000 | static inline void *__cache_alloc_node(struct kmem_cache *cachep, | ||
1001 | gfp_t flags, int nodeid) | ||
1002 | { | ||
1003 | return NULL; | ||
1004 | } | ||
1005 | |||
1006 | #else /* CONFIG_NUMA */ | ||
1007 | |||
976 | static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); | 1008 | static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); |
977 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); | 1009 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); |
978 | 1010 | ||
@@ -1101,26 +1133,6 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1101 | } | 1133 | } |
1102 | return 1; | 1134 | return 1; |
1103 | } | 1135 | } |
1104 | |||
1105 | #else | ||
1106 | |||
1107 | #define drain_alien_cache(cachep, alien) do { } while (0) | ||
1108 | #define reap_alien(cachep, l3) do { } while (0) | ||
1109 | |||
1110 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | ||
1111 | { | ||
1112 | return (struct array_cache **)BAD_ALIEN_MAGIC; | ||
1113 | } | ||
1114 | |||
1115 | static inline void free_alien_cache(struct array_cache **ac_ptr) | ||
1116 | { | ||
1117 | } | ||
1118 | |||
1119 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | ||
1120 | { | ||
1121 | return 0; | ||
1122 | } | ||
1123 | |||
1124 | #endif | 1136 | #endif |
1125 | 1137 | ||
1126 | static int __cpuinit cpuup_callback(struct notifier_block *nfb, | 1138 | static int __cpuinit cpuup_callback(struct notifier_block *nfb, |
@@ -1564,7 +1576,13 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1564 | */ | 1576 | */ |
1565 | flags |= __GFP_COMP; | 1577 | flags |= __GFP_COMP; |
1566 | #endif | 1578 | #endif |
1567 | flags |= cachep->gfpflags; | 1579 | |
1580 | /* | ||
1581 | * Under NUMA we want memory on the indicated node. We will handle | ||
1582 | * the needed fallback ourselves since we want to serve from our | ||
1583 | * per node object lists first for other nodes. | ||
1584 | */ | ||
1585 | flags |= cachep->gfpflags | GFP_THISNODE; | ||
1568 | 1586 | ||
1569 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); | 1587 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); |
1570 | if (!page) | 1588 | if (!page) |
@@ -3051,13 +3069,18 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep, | |||
3051 | 3069 | ||
3052 | local_irq_save(save_flags); | 3070 | local_irq_save(save_flags); |
3053 | 3071 | ||
3054 | #ifdef CONFIG_NUMA | 3072 | if (unlikely(NUMA_BUILD && |
3055 | if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) | 3073 | current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) |
3056 | objp = alternate_node_alloc(cachep, flags); | 3074 | objp = alternate_node_alloc(cachep, flags); |
3057 | #endif | ||
3058 | 3075 | ||
3059 | if (!objp) | 3076 | if (!objp) |
3060 | objp = ____cache_alloc(cachep, flags); | 3077 | objp = ____cache_alloc(cachep, flags); |
3078 | /* | ||
3079 | * We may just have run out of memory on the local node. | ||
3080 | * __cache_alloc_node() knows how to locate memory on other nodes | ||
3081 | */ | ||
3082 | if (NUMA_BUILD && !objp) | ||
3083 | objp = __cache_alloc_node(cachep, flags, numa_node_id()); | ||
3061 | local_irq_restore(save_flags); | 3084 | local_irq_restore(save_flags); |
3062 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, | 3085 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, |
3063 | caller); | 3086 | caller); |
@@ -3076,7 +3099,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3076 | { | 3099 | { |
3077 | int nid_alloc, nid_here; | 3100 | int nid_alloc, nid_here; |
3078 | 3101 | ||
3079 | if (in_interrupt()) | 3102 | if (in_interrupt() || (flags & __GFP_THISNODE)) |
3080 | return NULL; | 3103 | return NULL; |
3081 | nid_alloc = nid_here = numa_node_id(); | 3104 | nid_alloc = nid_here = numa_node_id(); |
3082 | if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) | 3105 | if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) |
@@ -3089,6 +3112,28 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3089 | } | 3112 | } |
3090 | 3113 | ||
3091 | /* | 3114 | /* |
3115 | * Fallback function if there was no memory available and no objects on a | ||
3116 | * certain node and we are allowed to fall back. We mimick the behavior of | ||
3117 | * the page allocator. We fall back according to a zonelist determined by | ||
3118 | * the policy layer while obeying cpuset constraints. | ||
3119 | */ | ||
3120 | void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) | ||
3121 | { | ||
3122 | struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy)) | ||
3123 | ->node_zonelists[gfp_zone(flags)]; | ||
3124 | struct zone **z; | ||
3125 | void *obj = NULL; | ||
3126 | |||
3127 | for (z = zonelist->zones; *z && !obj; z++) | ||
3128 | if (zone_idx(*z) <= ZONE_NORMAL && | ||
3129 | cpuset_zone_allowed(*z, flags)) | ||
3130 | obj = __cache_alloc_node(cache, | ||
3131 | flags | __GFP_THISNODE, | ||
3132 | zone_to_nid(*z)); | ||
3133 | return obj; | ||
3134 | } | ||
3135 | |||
3136 | /* | ||
3092 | * A interface to enable slab creation on nodeid | 3137 | * A interface to enable slab creation on nodeid |
3093 | */ | 3138 | */ |
3094 | static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, | 3139 | static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, |
@@ -3141,11 +3186,15 @@ retry: | |||
3141 | must_grow: | 3186 | must_grow: |
3142 | spin_unlock(&l3->list_lock); | 3187 | spin_unlock(&l3->list_lock); |
3143 | x = cache_grow(cachep, flags, nodeid); | 3188 | x = cache_grow(cachep, flags, nodeid); |
3189 | if (x) | ||
3190 | goto retry; | ||
3144 | 3191 | ||
3145 | if (!x) | 3192 | if (!(flags & __GFP_THISNODE)) |
3146 | return NULL; | 3193 | /* Unable to grow the cache. Fall back to other nodes. */ |
3194 | return fallback_alloc(cachep, flags); | ||
3195 | |||
3196 | return NULL; | ||
3147 | 3197 | ||
3148 | goto retry; | ||
3149 | done: | 3198 | done: |
3150 | return obj; | 3199 | return obj; |
3151 | } | 3200 | } |