aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c51
1 files changed, 50 insertions, 1 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 806a754fad8..67e0e058926 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1731,6 +1731,52 @@ static int __init cpucache_init(void)
1731} 1731}
1732__initcall(cpucache_init); 1732__initcall(cpucache_init);
1733 1733
1734static noinline void
1735slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1736{
1737 struct kmem_list3 *l3;
1738 struct slab *slabp;
1739 unsigned long flags;
1740 int node;
1741
1742 printk(KERN_WARNING
1743 "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1744 nodeid, gfpflags);
1745 printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n",
1746 cachep->name, cachep->buffer_size, cachep->gfporder);
1747
1748 for_each_online_node(node) {
1749 unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1750 unsigned long active_slabs = 0, num_slabs = 0;
1751
1752 l3 = cachep->nodelists[node];
1753 if (!l3)
1754 continue;
1755
1756 spin_lock_irqsave(&l3->list_lock, flags);
1757 list_for_each_entry(slabp, &l3->slabs_full, list) {
1758 active_objs += cachep->num;
1759 active_slabs++;
1760 }
1761 list_for_each_entry(slabp, &l3->slabs_partial, list) {
1762 active_objs += slabp->inuse;
1763 active_slabs++;
1764 }
1765 list_for_each_entry(slabp, &l3->slabs_free, list)
1766 num_slabs++;
1767
1768 free_objects += l3->free_objects;
1769 spin_unlock_irqrestore(&l3->list_lock, flags);
1770
1771 num_slabs += active_slabs;
1772 num_objs = num_slabs * cachep->num;
1773 printk(KERN_WARNING
1774 " node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1775 node, active_slabs, num_slabs, active_objs, num_objs,
1776 free_objects);
1777 }
1778}
1779
1734/* 1780/*
1735 * Interface to system's page allocator. No need to hold the cache-lock. 1781 * Interface to system's page allocator. No need to hold the cache-lock.
1736 * 1782 *
@@ -1757,8 +1803,11 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1757 flags |= __GFP_RECLAIMABLE; 1803 flags |= __GFP_RECLAIMABLE;
1758 1804
1759 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); 1805 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1760 if (!page) 1806 if (!page) {
1807 if (!(flags & __GFP_NOWARN) && printk_ratelimit())
1808 slab_out_of_memory(cachep, flags, nodeid);
1761 return NULL; 1809 return NULL;
1810 }
1762 1811
1763 nr_pages = (1 << cachep->gfporder); 1812 nr_pages = (1 << cachep->gfporder);
1764 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1813 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)