aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRafael Aquini <aquini@redhat.com>2012-03-09 15:27:27 -0500
committerPekka Enberg <penberg@kernel.org>2012-03-10 03:45:17 -0500
commit8bdec192b40cf7f7eec170b317c76089eb5eeddb (patch)
tree78bf7e353438b4ebe26b494e50d26111dc87d0d5 /mm
parenta8203725dfded5c1f79dca3368a4a273e24b59bb (diff)
mm: SLAB Out-of-memory diagnostics
Following the example at mm/slub.c, add out-of-memory diagnostics to the SLAB allocator to help on debugging certain OOM conditions. An example print out looks like this: <snip page allocator out-of-memory message> SLAB: Unable to allocate memory on node 0 (gfp=0x11200) cache: bio-0, object size: 192, order: 0 node 0: slabs: 3/3, objs: 60/60, free: 0 Signed-off-by: Rafael Aquini <aquini@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c51
1 files changed, 50 insertions, 1 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 806a754fad8e..67e0e0589267 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1731,6 +1731,52 @@ static int __init cpucache_init(void)
1731} 1731}
1732__initcall(cpucache_init); 1732__initcall(cpucache_init);
1733 1733
1734static noinline void
1735slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1736{
1737 struct kmem_list3 *l3;
1738 struct slab *slabp;
1739 unsigned long flags;
1740 int node;
1741
1742 printk(KERN_WARNING
1743 "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1744 nodeid, gfpflags);
1745 printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n",
1746 cachep->name, cachep->buffer_size, cachep->gfporder);
1747
1748 for_each_online_node(node) {
1749 unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1750 unsigned long active_slabs = 0, num_slabs = 0;
1751
1752 l3 = cachep->nodelists[node];
1753 if (!l3)
1754 continue;
1755
1756 spin_lock_irqsave(&l3->list_lock, flags);
1757 list_for_each_entry(slabp, &l3->slabs_full, list) {
1758 active_objs += cachep->num;
1759 active_slabs++;
1760 }
1761 list_for_each_entry(slabp, &l3->slabs_partial, list) {
1762 active_objs += slabp->inuse;
1763 active_slabs++;
1764 }
1765 list_for_each_entry(slabp, &l3->slabs_free, list)
1766 num_slabs++;
1767
1768 free_objects += l3->free_objects;
1769 spin_unlock_irqrestore(&l3->list_lock, flags);
1770
1771 num_slabs += active_slabs;
1772 num_objs = num_slabs * cachep->num;
1773 printk(KERN_WARNING
1774 " node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1775 node, active_slabs, num_slabs, active_objs, num_objs,
1776 free_objects);
1777 }
1778}
1779
1734/* 1780/*
1735 * Interface to system's page allocator. No need to hold the cache-lock. 1781 * Interface to system's page allocator. No need to hold the cache-lock.
1736 * 1782 *
@@ -1757,8 +1803,11 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1757 flags |= __GFP_RECLAIMABLE; 1803 flags |= __GFP_RECLAIMABLE;
1758 1804
1759 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); 1805 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1760 if (!page) 1806 if (!page) {
1807 if (!(flags & __GFP_NOWARN) && printk_ratelimit())
1808 slab_out_of_memory(cachep, flags, nodeid);
1761 return NULL; 1809 return NULL;
1810 }
1762 1811
1763 nr_pages = (1 << cachep->gfporder); 1812 nr_pages = (1 << cachep->gfporder);
1764 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1813 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)