aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2014-06-04 19:06:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:53:56 -0400
commit9a02d699935c9acdfefe431bbc33771d1d87da7f (patch)
tree907ec863b45a5ed64a46e671a1e4a91a9fece614 /mm/slub.c
parentecc42fbe952fa4aae88c2413e21912b1d665fb93 (diff)
mm, slab: suppress out of memory warning unless debug is enabled
When the slab or slub allocators cannot allocate additional slab pages, they emit diagnostic information to the kernel log such as current number of slabs, number of objects, active objects, etc. This is always coupled with a page allocation failure warning since it is controlled by !__GFP_NOWARN. Suppress this out of memory warning if the allocator is configured without debug supported. The page allocation failure warning will indicate it is a failed slab allocation, the order, and the gfp mask, so this is only useful to diagnose allocator issues. Since CONFIG_SLUB_DEBUG is already enabled by default for the slub allocator, there is no functional change with this patch. If debug is disabled, however, the warnings are now suppressed. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/mm/slub.c b/mm/slub.c
index de99d500af6c..65a0a5c57f31 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2119,11 +2119,19 @@ static inline int node_match(struct page *page, int node)
2119 return 1; 2119 return 1;
2120} 2120}
2121 2121
2122#ifdef CONFIG_SLUB_DEBUG
2122static int count_free(struct page *page) 2123static int count_free(struct page *page)
2123{ 2124{
2124 return page->objects - page->inuse; 2125 return page->objects - page->inuse;
2125} 2126}
2126 2127
2128static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2129{
2130 return atomic_long_read(&n->total_objects);
2131}
2132#endif /* CONFIG_SLUB_DEBUG */
2133
2134#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
2127static unsigned long count_partial(struct kmem_cache_node *n, 2135static unsigned long count_partial(struct kmem_cache_node *n,
2128 int (*get_count)(struct page *)) 2136 int (*get_count)(struct page *))
2129{ 2137{
@@ -2137,21 +2145,19 @@ static unsigned long count_partial(struct kmem_cache_node *n,
2137 spin_unlock_irqrestore(&n->list_lock, flags); 2145 spin_unlock_irqrestore(&n->list_lock, flags);
2138 return x; 2146 return x;
2139} 2147}
2140 2148#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2141static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2142{
2143#ifdef CONFIG_SLUB_DEBUG
2144 return atomic_long_read(&n->total_objects);
2145#else
2146 return 0;
2147#endif
2148}
2149 2149
2150static noinline void 2150static noinline void
2151slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 2151slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2152{ 2152{
2153#ifdef CONFIG_SLUB_DEBUG
2154 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2155 DEFAULT_RATELIMIT_BURST);
2153 int node; 2156 int node;
2154 2157
2158 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2159 return;
2160
2155 pr_warn("SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", 2161 pr_warn("SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
2156 nid, gfpflags); 2162 nid, gfpflags);
2157 pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n", 2163 pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
@@ -2178,6 +2184,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2178 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 2184 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
2179 node, nr_slabs, nr_objs, nr_free); 2185 node, nr_slabs, nr_objs, nr_free);
2180 } 2186 }
2187#endif
2181} 2188}
2182 2189
2183static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, 2190static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
@@ -2356,9 +2363,7 @@ new_slab:
2356 freelist = new_slab_objects(s, gfpflags, node, &c); 2363 freelist = new_slab_objects(s, gfpflags, node, &c);
2357 2364
2358 if (unlikely(!freelist)) { 2365 if (unlikely(!freelist)) {
2359 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) 2366 slab_out_of_memory(s, gfpflags, node);
2360 slab_out_of_memory(s, gfpflags, node);
2361
2362 local_irq_restore(flags); 2367 local_irq_restore(flags);
2363 return NULL; 2368 return NULL;
2364 } 2369 }