aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-10-16 04:25:33 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:58 -0400
commitf64dc58c5412233d4d44b0275eaebdc11bde23b3 (patch)
treec35755b1a2a04eefd97d3561640992e5d817ccba
parent04231b3002ac53f8a64a7bd142fde3fa4b6808c6 (diff)
Memoryless nodes: SLUB support
Simply switch all for_each_online_node to for_each_node_state(NORMAL_MEMORY). That way SLUB only operates on nodes with regular memory. Any allocation attempt on a memoryless node or a node with just highmem will fall whereupon SLUB will fetch memory from a nearby node (depending on how memory policies and cpuset describe fallback). Signed-off-by: Christoph Lameter <clameter@sgi.com> Tested-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Acked-by: Bob Picco <bob.picco@hp.com> Cc: Nishanth Aravamudan <nacc@us.ibm.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@skynet.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slub.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/slub.c b/mm/slub.c
index d7c044dbd157..968ce3776e08 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1921,7 +1921,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
1921{ 1921{
1922 int node; 1922 int node;
1923 1923
1924 for_each_online_node(node) { 1924 for_each_node_state(node, N_NORMAL_MEMORY) {
1925 struct kmem_cache_node *n = s->node[node]; 1925 struct kmem_cache_node *n = s->node[node];
1926 if (n && n != &s->local_node) 1926 if (n && n != &s->local_node)
1927 kmem_cache_free(kmalloc_caches, n); 1927 kmem_cache_free(kmalloc_caches, n);
@@ -1939,7 +1939,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
1939 else 1939 else
1940 local_node = 0; 1940 local_node = 0;
1941 1941
1942 for_each_online_node(node) { 1942 for_each_node_state(node, N_NORMAL_MEMORY) {
1943 struct kmem_cache_node *n; 1943 struct kmem_cache_node *n;
1944 1944
1945 if (local_node == node) 1945 if (local_node == node)
@@ -2192,7 +2192,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
2192 flush_all(s); 2192 flush_all(s);
2193 2193
2194 /* Attempt to free all objects */ 2194 /* Attempt to free all objects */
2195 for_each_online_node(node) { 2195 for_each_node_state(node, N_NORMAL_MEMORY) {
2196 struct kmem_cache_node *n = get_node(s, node); 2196 struct kmem_cache_node *n = get_node(s, node);
2197 2197
2198 n->nr_partial -= free_list(s, n, &n->partial); 2198 n->nr_partial -= free_list(s, n, &n->partial);
@@ -2521,7 +2521,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
2521 return -ENOMEM; 2521 return -ENOMEM;
2522 2522
2523 flush_all(s); 2523 flush_all(s);
2524 for_each_online_node(node) { 2524 for_each_node_state(node, N_NORMAL_MEMORY) {
2525 n = get_node(s, node); 2525 n = get_node(s, node);
2526 2526
2527 if (!n->nr_partial) 2527 if (!n->nr_partial)
@@ -2916,7 +2916,7 @@ static long validate_slab_cache(struct kmem_cache *s)
2916 return -ENOMEM; 2916 return -ENOMEM;
2917 2917
2918 flush_all(s); 2918 flush_all(s);
2919 for_each_online_node(node) { 2919 for_each_node_state(node, N_NORMAL_MEMORY) {
2920 struct kmem_cache_node *n = get_node(s, node); 2920 struct kmem_cache_node *n = get_node(s, node);
2921 2921
2922 count += validate_slab_node(s, n, map); 2922 count += validate_slab_node(s, n, map);
@@ -3136,7 +3136,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
3136 /* Push back cpu slabs */ 3136 /* Push back cpu slabs */
3137 flush_all(s); 3137 flush_all(s);
3138 3138
3139 for_each_online_node(node) { 3139 for_each_node_state(node, N_NORMAL_MEMORY) {
3140 struct kmem_cache_node *n = get_node(s, node); 3140 struct kmem_cache_node *n = get_node(s, node);
3141 unsigned long flags; 3141 unsigned long flags;
3142 struct page *page; 3142 struct page *page;
@@ -3263,7 +3263,7 @@ static unsigned long slab_objects(struct kmem_cache *s,
3263 } 3263 }
3264 } 3264 }
3265 3265
3266 for_each_online_node(node) { 3266 for_each_node_state(node, N_NORMAL_MEMORY) {
3267 struct kmem_cache_node *n = get_node(s, node); 3267 struct kmem_cache_node *n = get_node(s, node);
3268 3268
3269 if (flags & SO_PARTIAL) { 3269 if (flags & SO_PARTIAL) {
@@ -3291,7 +3291,7 @@ static unsigned long slab_objects(struct kmem_cache *s,
3291 3291
3292 x = sprintf(buf, "%lu", total); 3292 x = sprintf(buf, "%lu", total);
3293#ifdef CONFIG_NUMA 3293#ifdef CONFIG_NUMA
3294 for_each_online_node(node) 3294 for_each_node_state(node, N_NORMAL_MEMORY)
3295 if (nodes[node]) 3295 if (nodes[node])
3296 x += sprintf(buf + x, " N%d=%lu", 3296 x += sprintf(buf + x, " N%d=%lu",
3297 node, nodes[node]); 3297 node, nodes[node]);