aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/slab.c10
-rw-r--r--mm/slub.c29
2 files changed, 25 insertions, 14 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 19d92181ce24..5c846d25c17d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1621,10 +1621,16 @@ __initcall(cpucache_init);
1621static noinline void 1621static noinline void
1622slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) 1622slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1623{ 1623{
1624#if DEBUG
1624 struct kmem_cache_node *n; 1625 struct kmem_cache_node *n;
1625 struct page *page; 1626 struct page *page;
1626 unsigned long flags; 1627 unsigned long flags;
1627 int node; 1628 int node;
1629 static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1630 DEFAULT_RATELIMIT_BURST);
1631
1632 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1633 return;
1628 1634
1629 printk(KERN_WARNING 1635 printk(KERN_WARNING
1630 "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n", 1636 "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
@@ -1662,6 +1668,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1662 node, active_slabs, num_slabs, active_objs, num_objs, 1668 node, active_slabs, num_slabs, active_objs, num_objs,
1663 free_objects); 1669 free_objects);
1664 } 1670 }
1671#endif
1665} 1672}
1666 1673
1667/* 1674/*
@@ -1683,8 +1690,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1683 1690
1684 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); 1691 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1685 if (!page) { 1692 if (!page) {
1686 if (!(flags & __GFP_NOWARN) && printk_ratelimit()) 1693 slab_out_of_memory(cachep, flags, nodeid);
1687 slab_out_of_memory(cachep, flags, nodeid);
1688 return NULL; 1694 return NULL;
1689 } 1695 }
1690 1696
diff --git a/mm/slub.c b/mm/slub.c
index de99d500af6c..65a0a5c57f31 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2119,11 +2119,19 @@ static inline int node_match(struct page *page, int node)
2119 return 1; 2119 return 1;
2120} 2120}
2121 2121
2122#ifdef CONFIG_SLUB_DEBUG
2122static int count_free(struct page *page) 2123static int count_free(struct page *page)
2123{ 2124{
2124 return page->objects - page->inuse; 2125 return page->objects - page->inuse;
2125} 2126}
2126 2127
2128static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2129{
2130 return atomic_long_read(&n->total_objects);
2131}
2132#endif /* CONFIG_SLUB_DEBUG */
2133
2134#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
2127static unsigned long count_partial(struct kmem_cache_node *n, 2135static unsigned long count_partial(struct kmem_cache_node *n,
2128 int (*get_count)(struct page *)) 2136 int (*get_count)(struct page *))
2129{ 2137{
@@ -2137,21 +2145,19 @@ static unsigned long count_partial(struct kmem_cache_node *n,
2137 spin_unlock_irqrestore(&n->list_lock, flags); 2145 spin_unlock_irqrestore(&n->list_lock, flags);
2138 return x; 2146 return x;
2139} 2147}
2140 2148#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2141static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2142{
2143#ifdef CONFIG_SLUB_DEBUG
2144 return atomic_long_read(&n->total_objects);
2145#else
2146 return 0;
2147#endif
2148}
2149 2149
2150static noinline void 2150static noinline void
2151slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 2151slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2152{ 2152{
2153#ifdef CONFIG_SLUB_DEBUG
2154 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2155 DEFAULT_RATELIMIT_BURST);
2153 int node; 2156 int node;
2154 2157
2158 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2159 return;
2160
2155 pr_warn("SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", 2161 pr_warn("SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
2156 nid, gfpflags); 2162 nid, gfpflags);
2157 pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n", 2163 pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
@@ -2178,6 +2184,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2178 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 2184 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
2179 node, nr_slabs, nr_objs, nr_free); 2185 node, nr_slabs, nr_objs, nr_free);
2180 } 2186 }
2187#endif
2181} 2188}
2182 2189
2183static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, 2190static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
@@ -2356,9 +2363,7 @@ new_slab:
2356 freelist = new_slab_objects(s, gfpflags, node, &c); 2363 freelist = new_slab_objects(s, gfpflags, node, &c);
2357 2364
2358 if (unlikely(!freelist)) { 2365 if (unlikely(!freelist)) {
2359 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) 2366 slab_out_of_memory(s, gfpflags, node);
2360 slab_out_of_memory(s, gfpflags, node);
2361
2362 local_irq_restore(flags); 2367 local_irq_restore(flags);
2363 return NULL; 2368 return NULL;
2364 } 2369 }