aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-08-28 01:56:11 -0400
committerPaul Mackerras <paulus@samba.org>2007-08-28 01:56:11 -0400
commit35438c4327df18dbf5e7f597b69299119f4a14de (patch)
treea4589d731015db93f2eba8f84ffb1f48a8084020 /mm/slub.c
parent2f6c9d961081dc7b109eb19166244bcb2a5dfc28 (diff)
parentb07d68b5ca4d55a16fab223d63d5fb36f89ff42f (diff)
Merge branch 'linux-2.6' into for-2.6.24
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 69d02e3e439e..04151da399c6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1877,9 +1877,16 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag
1877 1877
1878 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); 1878 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
1879 1879
1880 page = new_slab(kmalloc_caches, gfpflags | GFP_THISNODE, node); 1880 page = new_slab(kmalloc_caches, gfpflags, node);
1881 1881
1882 BUG_ON(!page); 1882 BUG_ON(!page);
1883 if (page_to_nid(page) != node) {
1884 printk(KERN_ERR "SLUB: Unable to allocate memory from "
1885 "node %d\n", node);
1886 printk(KERN_ERR "SLUB: Allocating a useless per node structure "
1887 "in order to be able to continue\n");
1888 }
1889
1883 n = page->freelist; 1890 n = page->freelist;
1884 BUG_ON(!n); 1891 BUG_ON(!n);
1885 page->freelist = get_freepointer(kmalloc_caches, n); 1892 page->freelist = get_freepointer(kmalloc_caches, n);
@@ -3112,7 +3119,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
3112 unsigned long flags; 3119 unsigned long flags;
3113 struct page *page; 3120 struct page *page;
3114 3121
3115 if (!atomic_read(&n->nr_slabs)) 3122 if (!atomic_long_read(&n->nr_slabs))
3116 continue; 3123 continue;
3117 3124
3118 spin_lock_irqsave(&n->list_lock, flags); 3125 spin_lock_irqsave(&n->list_lock, flags);
@@ -3247,7 +3254,7 @@ static unsigned long slab_objects(struct kmem_cache *s,
3247 } 3254 }
3248 3255
3249 if (flags & SO_FULL) { 3256 if (flags & SO_FULL) {
3250 int full_slabs = atomic_read(&n->nr_slabs) 3257 int full_slabs = atomic_long_read(&n->nr_slabs)
3251 - per_cpu[node] 3258 - per_cpu[node]
3252 - n->nr_partial; 3259 - n->nr_partial;
3253 3260
@@ -3283,7 +3290,7 @@ static int any_slab_objects(struct kmem_cache *s)
3283 for_each_node(node) { 3290 for_each_node(node) {
3284 struct kmem_cache_node *n = get_node(s, node); 3291 struct kmem_cache_node *n = get_node(s, node);
3285 3292
3286 if (n->nr_partial || atomic_read(&n->nr_slabs)) 3293 if (n->nr_partial || atomic_long_read(&n->nr_slabs))
3287 return 1; 3294 return 1;
3288 } 3295 }
3289 return 0; 3296 return 0;