aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2009-06-10 11:50:32 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2009-06-11 11:14:18 -0400
commit781b2ba6eb5f22440afac9c79a89ebd6e3674a60 (patch)
tree89f3665c52e68cde9aab3eaf249d33e90db5fc20 /mm
parent59a3759d0fe8d969888c741bb33f4946e4d3750d (diff)
SLUB: Out-of-memory diagnostics
As suggested by Mel Gorman, add out-of-memory diagnostics to the SLUB allocator to make debugging OOM conditions easier. This patch helped hunt down a nasty OOM issue that popped up every now that was caused by SLUB debugging code which forced 4096 byte allocations to use order 1 pages even in the fallback case. An example print out looks like this: <snip page allocator out-of-memory message> SLUB: Unable to allocate memory on node -1 (gfp=20) cache: kmalloc-4096, object size: 4096, buffer size: 4168, default order: 3, min order: 1 node 0: slabs: 95, objs: 665, free: 0 Acked-by: Christoph Lameter <cl@linux-foundation.org> Acked-by: Mel Gorman <mel@csn.ul.ie> Tested-by: Larry Finger <Larry.Finger@lwfinger.net> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c70
1 files changed, 51 insertions, 19 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 65ffda5934b..a5a4ecf7e39 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1484,6 +1484,56 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
1484 return 1; 1484 return 1;
1485} 1485}
1486 1486
1487static int count_free(struct page *page)
1488{
1489 return page->objects - page->inuse;
1490}
1491
1492static unsigned long count_partial(struct kmem_cache_node *n,
1493 int (*get_count)(struct page *))
1494{
1495 unsigned long flags;
1496 unsigned long x = 0;
1497 struct page *page;
1498
1499 spin_lock_irqsave(&n->list_lock, flags);
1500 list_for_each_entry(page, &n->partial, lru)
1501 x += get_count(page);
1502 spin_unlock_irqrestore(&n->list_lock, flags);
1503 return x;
1504}
1505
1506static noinline void
1507slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
1508{
1509 int node;
1510
1511 printk(KERN_WARNING
1512 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1513 nid, gfpflags);
1514 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
1515 "default order: %d, min order: %d\n", s->name, s->objsize,
1516 s->size, oo_order(s->oo), oo_order(s->min));
1517
1518 for_each_online_node(node) {
1519 struct kmem_cache_node *n = get_node(s, node);
1520 unsigned long nr_slabs;
1521 unsigned long nr_objs;
1522 unsigned long nr_free;
1523
1524 if (!n)
1525 continue;
1526
1527 nr_slabs = atomic_long_read(&n->nr_slabs);
1528 nr_objs = atomic_long_read(&n->total_objects);
1529 nr_free = count_partial(n, count_free);
1530
1531 printk(KERN_WARNING
1532 " node %d: slabs: %ld, objs: %ld, free: %ld\n",
1533 node, nr_slabs, nr_objs, nr_free);
1534 }
1535}
1536
1487/* 1537/*
1488 * Slow path. The lockless freelist is empty or we need to perform 1538 * Slow path. The lockless freelist is empty or we need to perform
1489 * debugging duties. 1539 * debugging duties.
@@ -1565,6 +1615,7 @@ new_slab:
1565 c->page = new; 1615 c->page = new;
1566 goto load_freelist; 1616 goto load_freelist;
1567 } 1617 }
1618 slab_out_of_memory(s, gfpflags, node);
1568 return NULL; 1619 return NULL;
1569debug: 1620debug:
1570 if (!alloc_debug_processing(s, c->page, object, addr)) 1621 if (!alloc_debug_processing(s, c->page, object, addr))
@@ -3318,20 +3369,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3318} 3369}
3319 3370
3320#ifdef CONFIG_SLUB_DEBUG 3371#ifdef CONFIG_SLUB_DEBUG
3321static unsigned long count_partial(struct kmem_cache_node *n,
3322 int (*get_count)(struct page *))
3323{
3324 unsigned long flags;
3325 unsigned long x = 0;
3326 struct page *page;
3327
3328 spin_lock_irqsave(&n->list_lock, flags);
3329 list_for_each_entry(page, &n->partial, lru)
3330 x += get_count(page);
3331 spin_unlock_irqrestore(&n->list_lock, flags);
3332 return x;
3333}
3334
3335static int count_inuse(struct page *page) 3372static int count_inuse(struct page *page)
3336{ 3373{
3337 return page->inuse; 3374 return page->inuse;
@@ -3342,11 +3379,6 @@ static int count_total(struct page *page)
3342 return page->objects; 3379 return page->objects;
3343} 3380}
3344 3381
3345static int count_free(struct page *page)
3346{
3347 return page->objects - page->inuse;
3348}
3349
3350static int validate_slab(struct kmem_cache *s, struct page *page, 3382static int validate_slab(struct kmem_cache *s, struct page *page,
3351 unsigned long *map) 3383 unsigned long *map)
3352{ 3384{