aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-15 07:15:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-15 07:15:06 -0400
commitbff157b3ad4b9f6be0af6987fcd62deaf0f2b799 (patch)
tree02ae68620a40fefd9ffc2de739a8bb362baa3f08
parent8bf5e36d0429e9b8fc2c84966577f10386bd7195 (diff)
parent23774a2f6fee0848503bfb8004eeeb5adef94f5c (diff)
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB update from Pekka Enberg: "Nothing terribly exciting here apart from Christoph's kmalloc unification patches that brings sl[aou]b implementations closer to each other" * 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: slab: Use correct GFP_DMA constant slub: remove verify_mem_not_deleted() mm/sl[aou]b: Move kmallocXXX functions to common code mm, slab_common: add 'unlikely' to size check of kmalloc_slab() mm/slub.c: beautify code for removing redundancy 'break' statement. slub: Remove unnecessary page NULL check slub: don't use cpu partial pages on UP mm/slub: beautify code for 80 column limitation and tab alignment mm/slub: remove 'per_cpu' which is useless variable
-rw-r--r--include/linux/slab.h156
-rw-r--r--include/linux/slab_def.h106
-rw-r--r--include/linux/slob_def.h31
-rw-r--r--include/linux/slub_def.h110
-rw-r--r--init/Kconfig2
-rw-r--r--mm/slab_common.c12
-rw-r--r--mm/slob.c28
-rw-r--r--mm/slub.c142
8 files changed, 216 insertions, 371 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6c5cc0ea8713..74f105847d13 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -4,6 +4,8 @@
4 * (C) SGI 2006, Christoph Lameter 4 * (C) SGI 2006, Christoph Lameter
5 * Cleaned up and restructured to ease the addition of alternative 5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators. 6 * implementations of SLAB allocators.
7 * (C) Linux Foundation 2008-2013
8 * Unified interface for all slab allocators
7 */ 9 */
8 10
9#ifndef _LINUX_SLAB_H 11#ifndef _LINUX_SLAB_H
@@ -94,6 +96,7 @@
94#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 96#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
95 (unsigned long)ZERO_SIZE_PTR) 97 (unsigned long)ZERO_SIZE_PTR)
96 98
99#include <linux/kmemleak.h>
97 100
98struct mem_cgroup; 101struct mem_cgroup;
99/* 102/*
@@ -289,6 +292,57 @@ static __always_inline int kmalloc_index(size_t size)
289} 292}
290#endif /* !CONFIG_SLOB */ 293#endif /* !CONFIG_SLOB */
291 294
295void *__kmalloc(size_t size, gfp_t flags);
296void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
297
298#ifdef CONFIG_NUMA
299void *__kmalloc_node(size_t size, gfp_t flags, int node);
300void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
301#else
302static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
303{
304 return __kmalloc(size, flags);
305}
306
307static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
308{
309 return kmem_cache_alloc(s, flags);
310}
311#endif
312
313#ifdef CONFIG_TRACING
314extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
315
316#ifdef CONFIG_NUMA
317extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
318 gfp_t gfpflags,
319 int node, size_t size);
320#else
321static __always_inline void *
322kmem_cache_alloc_node_trace(struct kmem_cache *s,
323 gfp_t gfpflags,
324 int node, size_t size)
325{
326 return kmem_cache_alloc_trace(s, gfpflags, size);
327}
328#endif /* CONFIG_NUMA */
329
330#else /* CONFIG_TRACING */
331static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
332 gfp_t flags, size_t size)
333{
334 return kmem_cache_alloc(s, flags);
335}
336
337static __always_inline void *
338kmem_cache_alloc_node_trace(struct kmem_cache *s,
339 gfp_t gfpflags,
340 int node, size_t size)
341{
342 return kmem_cache_alloc_node(s, gfpflags, node);
343}
344#endif /* CONFIG_TRACING */
345
292#ifdef CONFIG_SLAB 346#ifdef CONFIG_SLAB
293#include <linux/slab_def.h> 347#include <linux/slab_def.h>
294#endif 348#endif
@@ -297,9 +351,60 @@ static __always_inline int kmalloc_index(size_t size)
297#include <linux/slub_def.h> 351#include <linux/slub_def.h>
298#endif 352#endif
299 353
300#ifdef CONFIG_SLOB 354static __always_inline void *
301#include <linux/slob_def.h> 355kmalloc_order(size_t size, gfp_t flags, unsigned int order)
356{
357 void *ret;
358
359 flags |= (__GFP_COMP | __GFP_KMEMCG);
360 ret = (void *) __get_free_pages(flags, order);
361 kmemleak_alloc(ret, size, 1, flags);
362 return ret;
363}
364
365#ifdef CONFIG_TRACING
366extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
367#else
368static __always_inline void *
369kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
370{
371 return kmalloc_order(size, flags, order);
372}
373#endif
374
375static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
376{
377 unsigned int order = get_order(size);
378 return kmalloc_order_trace(size, flags, order);
379}
380
381/**
382 * kmalloc - allocate memory
383 * @size: how many bytes of memory are required.
384 * @flags: the type of memory to allocate (see kcalloc).
385 *
386 * kmalloc is the normal method of allocating memory
387 * for objects smaller than page size in the kernel.
388 */
389static __always_inline void *kmalloc(size_t size, gfp_t flags)
390{
391 if (__builtin_constant_p(size)) {
392 if (size > KMALLOC_MAX_CACHE_SIZE)
393 return kmalloc_large(size, flags);
394#ifndef CONFIG_SLOB
395 if (!(flags & GFP_DMA)) {
396 int index = kmalloc_index(size);
397
398 if (!index)
399 return ZERO_SIZE_PTR;
400
401 return kmem_cache_alloc_trace(kmalloc_caches[index],
402 flags, size);
403 }
302#endif 404#endif
405 }
406 return __kmalloc(size, flags);
407}
303 408
304/* 409/*
305 * Determine size used for the nth kmalloc cache. 410 * Determine size used for the nth kmalloc cache.
@@ -321,6 +426,23 @@ static __always_inline int kmalloc_size(int n)
321 return 0; 426 return 0;
322} 427}
323 428
429static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
430{
431#ifndef CONFIG_SLOB
432 if (__builtin_constant_p(size) &&
433 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
434 int i = kmalloc_index(size);
435
436 if (!i)
437 return ZERO_SIZE_PTR;
438
439 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
440 flags, node, size);
441 }
442#endif
443 return __kmalloc_node(size, flags, node);
444}
445
324/* 446/*
325 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 447 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
326 * Intended for arches that get misalignment faults even for 64 bit integer 448 * Intended for arches that get misalignment faults even for 64 bit integer
@@ -451,36 +573,6 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
451 return kmalloc_array(n, size, flags | __GFP_ZERO); 573 return kmalloc_array(n, size, flags | __GFP_ZERO);
452} 574}
453 575
454#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
455/**
456 * kmalloc_node - allocate memory from a specific node
457 * @size: how many bytes of memory are required.
458 * @flags: the type of memory to allocate (see kmalloc).
459 * @node: node to allocate from.
460 *
461 * kmalloc() for non-local nodes, used to allocate from a specific node
462 * if available. Equivalent to kmalloc() in the non-NUMA single-node
463 * case.
464 */
465static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
466{
467 return kmalloc(size, flags);
468}
469
470static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
471{
472 return __kmalloc(size, flags);
473}
474
475void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
476
477static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
478 gfp_t flags, int node)
479{
480 return kmem_cache_alloc(cachep, flags);
481}
482#endif /* !CONFIG_NUMA && !CONFIG_SLOB */
483
484/* 576/*
485 * kmalloc_track_caller is a special version of kmalloc that records the 577 * kmalloc_track_caller is a special version of kmalloc that records the
486 * calling function of the routine calling it for slab leak tracking instead 578 * calling function of the routine calling it for slab leak tracking instead
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index cd401580bdd3..e9346b4f1ef4 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -3,20 +3,6 @@
3 3
4/* 4/*
5 * Definitions unique to the original Linux SLAB allocator. 5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <linux/compiler.h>
15
16/*
17 * struct kmem_cache
18 *
19 * manages a cache.
20 */ 6 */
21 7
22struct kmem_cache { 8struct kmem_cache {
@@ -102,96 +88,4 @@ struct kmem_cache {
102 */ 88 */
103}; 89};
104 90
105void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
106void *__kmalloc(size_t size, gfp_t flags);
107
108#ifdef CONFIG_TRACING
109extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
110#else
111static __always_inline void *
112kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
113{
114 return kmem_cache_alloc(cachep, flags);
115}
116#endif
117
118static __always_inline void *kmalloc(size_t size, gfp_t flags)
119{
120 struct kmem_cache *cachep;
121 void *ret;
122
123 if (__builtin_constant_p(size)) {
124 int i;
125
126 if (!size)
127 return ZERO_SIZE_PTR;
128
129 if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
130 return NULL;
131
132 i = kmalloc_index(size);
133
134#ifdef CONFIG_ZONE_DMA
135 if (flags & GFP_DMA)
136 cachep = kmalloc_dma_caches[i];
137 else
138#endif
139 cachep = kmalloc_caches[i];
140
141 ret = kmem_cache_alloc_trace(cachep, flags, size);
142
143 return ret;
144 }
145 return __kmalloc(size, flags);
146}
147
148#ifdef CONFIG_NUMA
149extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
150extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
151
152#ifdef CONFIG_TRACING
153extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
154 gfp_t flags,
155 int nodeid,
156 size_t size);
157#else
158static __always_inline void *
159kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
160 gfp_t flags,
161 int nodeid,
162 size_t size)
163{
164 return kmem_cache_alloc_node(cachep, flags, nodeid);
165}
166#endif
167
168static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
169{
170 struct kmem_cache *cachep;
171
172 if (__builtin_constant_p(size)) {
173 int i;
174
175 if (!size)
176 return ZERO_SIZE_PTR;
177
178 if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
179 return NULL;
180
181 i = kmalloc_index(size);
182
183#ifdef CONFIG_ZONE_DMA
184 if (flags & GFP_DMA)
185 cachep = kmalloc_dma_caches[i];
186 else
187#endif
188 cachep = kmalloc_caches[i];
189
190 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
191 }
192 return __kmalloc_node(size, flags, node);
193}
194
195#endif /* CONFIG_NUMA */
196
197#endif /* _LINUX_SLAB_DEF_H */ 91#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
deleted file mode 100644
index 095a5a4a8516..000000000000
--- a/include/linux/slob_def.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef __LINUX_SLOB_DEF_H
2#define __LINUX_SLOB_DEF_H
3
4#include <linux/numa.h>
5
6void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
7
8static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
9 gfp_t flags)
10{
11 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
12}
13
14void *__kmalloc_node(size_t size, gfp_t flags, int node);
15
16static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
17{
18 return __kmalloc_node(size, flags, node);
19}
20
21static __always_inline void *kmalloc(size_t size, gfp_t flags)
22{
23 return __kmalloc_node(size, flags, NUMA_NO_NODE);
24}
25
26static __always_inline void *__kmalloc(size_t size, gfp_t flags)
27{
28 return kmalloc(size, flags);
29}
30
31#endif /* __LINUX_SLOB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 027276fa8713..cc0b67eada42 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -6,14 +6,8 @@
6 * 6 *
7 * (C) 2007 SGI, Christoph Lameter 7 * (C) 2007 SGI, Christoph Lameter
8 */ 8 */
9#include <linux/types.h>
10#include <linux/gfp.h>
11#include <linux/bug.h>
12#include <linux/workqueue.h>
13#include <linux/kobject.h> 9#include <linux/kobject.h>
14 10
15#include <linux/kmemleak.h>
16
17enum stat_item { 11enum stat_item {
18 ALLOC_FASTPATH, /* Allocation from cpu slab */ 12 ALLOC_FASTPATH, /* Allocation from cpu slab */
19 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 13 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
@@ -104,108 +98,4 @@ struct kmem_cache {
104 struct kmem_cache_node *node[MAX_NUMNODES]; 98 struct kmem_cache_node *node[MAX_NUMNODES];
105}; 99};
106 100
107void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
108void *__kmalloc(size_t size, gfp_t flags);
109
110static __always_inline void *
111kmalloc_order(size_t size, gfp_t flags, unsigned int order)
112{
113 void *ret;
114
115 flags |= (__GFP_COMP | __GFP_KMEMCG);
116 ret = (void *) __get_free_pages(flags, order);
117 kmemleak_alloc(ret, size, 1, flags);
118 return ret;
119}
120
121/**
122 * Calling this on allocated memory will check that the memory
123 * is expected to be in use, and print warnings if not.
124 */
125#ifdef CONFIG_SLUB_DEBUG
126extern bool verify_mem_not_deleted(const void *x);
127#else
128static inline bool verify_mem_not_deleted(const void *x)
129{
130 return true;
131}
132#endif
133
134#ifdef CONFIG_TRACING
135extern void *
136kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
137extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
138#else
139static __always_inline void *
140kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
141{
142 return kmem_cache_alloc(s, gfpflags);
143}
144
145static __always_inline void *
146kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
147{
148 return kmalloc_order(size, flags, order);
149}
150#endif
151
152static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
153{
154 unsigned int order = get_order(size);
155 return kmalloc_order_trace(size, flags, order);
156}
157
158static __always_inline void *kmalloc(size_t size, gfp_t flags)
159{
160 if (__builtin_constant_p(size)) {
161 if (size > KMALLOC_MAX_CACHE_SIZE)
162 return kmalloc_large(size, flags);
163
164 if (!(flags & GFP_DMA)) {
165 int index = kmalloc_index(size);
166
167 if (!index)
168 return ZERO_SIZE_PTR;
169
170 return kmem_cache_alloc_trace(kmalloc_caches[index],
171 flags, size);
172 }
173 }
174 return __kmalloc(size, flags);
175}
176
177#ifdef CONFIG_NUMA
178void *__kmalloc_node(size_t size, gfp_t flags, int node);
179void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
180
181#ifdef CONFIG_TRACING
182extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
183 gfp_t gfpflags,
184 int node, size_t size);
185#else
186static __always_inline void *
187kmem_cache_alloc_node_trace(struct kmem_cache *s,
188 gfp_t gfpflags,
189 int node, size_t size)
190{
191 return kmem_cache_alloc_node(s, gfpflags, node);
192}
193#endif
194
195static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
196{
197 if (__builtin_constant_p(size) &&
198 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
199 int index = kmalloc_index(size);
200
201 if (!index)
202 return ZERO_SIZE_PTR;
203
204 return kmem_cache_alloc_node_trace(kmalloc_caches[index],
205 flags, node, size);
206 }
207 return __kmalloc_node(size, flags, node);
208}
209#endif
210
211#endif /* _LINUX_SLUB_DEF_H */ 101#endif /* _LINUX_SLUB_DEF_H */
diff --git a/init/Kconfig b/init/Kconfig
index 18bd9e3d3274..3ecd8a1178f1 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1602,7 +1602,7 @@ endchoice
1602 1602
1603config SLUB_CPU_PARTIAL 1603config SLUB_CPU_PARTIAL
1604 default y 1604 default y
1605 depends on SLUB 1605 depends on SLUB && SMP
1606 bool "SLUB per cpu partial cache" 1606 bool "SLUB per cpu partial cache"
1607 help 1607 help
1608 Per cpu partial caches accellerate objects allocation and freeing 1608 Per cpu partial caches accellerate objects allocation and freeing
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 538bade6df7d..a3443278ce3a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -19,6 +19,7 @@
19#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <linux/memcontrol.h> 21#include <linux/memcontrol.h>
22#include <trace/events/kmem.h>
22 23
23#include "slab.h" 24#include "slab.h"
24 25
@@ -373,7 +374,7 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
373{ 374{
374 int index; 375 int index;
375 376
376 if (size > KMALLOC_MAX_SIZE) { 377 if (unlikely(size > KMALLOC_MAX_SIZE)) {
377 WARN_ON_ONCE(!(flags & __GFP_NOWARN)); 378 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
378 return NULL; 379 return NULL;
379 } 380 }
@@ -495,6 +496,15 @@ void __init create_kmalloc_caches(unsigned long flags)
495} 496}
496#endif /* !CONFIG_SLOB */ 497#endif /* !CONFIG_SLOB */
497 498
499#ifdef CONFIG_TRACING
500void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
501{
502 void *ret = kmalloc_order(size, flags, order);
503 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
504 return ret;
505}
506EXPORT_SYMBOL(kmalloc_order_trace);
507#endif
498 508
499#ifdef CONFIG_SLABINFO 509#ifdef CONFIG_SLABINFO
500 510
diff --git a/mm/slob.c b/mm/slob.c
index 91bd3f2dd2f0..4bf8809dfcce 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -462,11 +462,11 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
462 return ret; 462 return ret;
463} 463}
464 464
465void *__kmalloc_node(size_t size, gfp_t gfp, int node) 465void *__kmalloc(size_t size, gfp_t gfp)
466{ 466{
467 return __do_kmalloc_node(size, gfp, node, _RET_IP_); 467 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
468} 468}
469EXPORT_SYMBOL(__kmalloc_node); 469EXPORT_SYMBOL(__kmalloc);
470 470
471#ifdef CONFIG_TRACING 471#ifdef CONFIG_TRACING
472void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) 472void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
@@ -534,7 +534,7 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
534 return 0; 534 return 0;
535} 535}
536 536
537void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) 537void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
538{ 538{
539 void *b; 539 void *b;
540 540
@@ -560,7 +560,27 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
560 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); 560 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
561 return b; 561 return b;
562} 562}
563EXPORT_SYMBOL(slob_alloc_node);
564
565void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
566{
567 return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
568}
569EXPORT_SYMBOL(kmem_cache_alloc);
570
571#ifdef CONFIG_NUMA
572void *__kmalloc_node(size_t size, gfp_t gfp, int node)
573{
574 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
575}
576EXPORT_SYMBOL(__kmalloc_node);
577
578void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
579{
580 return slob_alloc_node(cachep, gfp, node);
581}
563EXPORT_SYMBOL(kmem_cache_alloc_node); 582EXPORT_SYMBOL(kmem_cache_alloc_node);
583#endif
564 584
565static void __kmem_cache_free(void *b, int size) 585static void __kmem_cache_free(void *b, int size)
566{ 586{
diff --git a/mm/slub.c b/mm/slub.c
index 51df8272cfaf..c3eb3d3ca835 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -373,7 +373,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
373#endif 373#endif
374 { 374 {
375 slab_lock(page); 375 slab_lock(page);
376 if (page->freelist == freelist_old && page->counters == counters_old) { 376 if (page->freelist == freelist_old &&
377 page->counters == counters_old) {
377 page->freelist = freelist_new; 378 page->freelist = freelist_new;
378 page->counters = counters_new; 379 page->counters = counters_new;
379 slab_unlock(page); 380 slab_unlock(page);
@@ -411,7 +412,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
411 412
412 local_irq_save(flags); 413 local_irq_save(flags);
413 slab_lock(page); 414 slab_lock(page);
414 if (page->freelist == freelist_old && page->counters == counters_old) { 415 if (page->freelist == freelist_old &&
416 page->counters == counters_old) {
415 page->freelist = freelist_new; 417 page->freelist = freelist_new;
416 page->counters = counters_new; 418 page->counters = counters_new;
417 slab_unlock(page); 419 slab_unlock(page);
@@ -553,8 +555,9 @@ static void print_tracking(struct kmem_cache *s, void *object)
553 555
554static void print_page_info(struct page *page) 556static void print_page_info(struct page *page)
555{ 557{
556 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", 558 printk(KERN_ERR
557 page, page->objects, page->inuse, page->freelist, page->flags); 559 "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
560 page, page->objects, page->inuse, page->freelist, page->flags);
558 561
559} 562}
560 563
@@ -629,7 +632,8 @@ static void object_err(struct kmem_cache *s, struct page *page,
629 print_trailer(s, page, object); 632 print_trailer(s, page, object);
630} 633}
631 634
632static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...) 635static void slab_err(struct kmem_cache *s, struct page *page,
636 const char *fmt, ...)
633{ 637{
634 va_list args; 638 va_list args;
635 char buf[100]; 639 char buf[100];
@@ -788,7 +792,8 @@ static int check_object(struct kmem_cache *s, struct page *page,
788 } else { 792 } else {
789 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 793 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
790 check_bytes_and_report(s, page, p, "Alignment padding", 794 check_bytes_and_report(s, page, p, "Alignment padding",
791 endobject, POISON_INUSE, s->inuse - s->object_size); 795 endobject, POISON_INUSE,
796 s->inuse - s->object_size);
792 } 797 }
793 } 798 }
794 799
@@ -873,7 +878,6 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
873 object_err(s, page, object, 878 object_err(s, page, object,
874 "Freechain corrupt"); 879 "Freechain corrupt");
875 set_freepointer(s, object, NULL); 880 set_freepointer(s, object, NULL);
876 break;
877 } else { 881 } else {
878 slab_err(s, page, "Freepointer corrupt"); 882 slab_err(s, page, "Freepointer corrupt");
879 page->freelist = NULL; 883 page->freelist = NULL;
@@ -918,7 +922,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
918 page->freelist); 922 page->freelist);
919 923
920 if (!alloc) 924 if (!alloc)
921 print_section("Object ", (void *)object, s->object_size); 925 print_section("Object ", (void *)object,
926 s->object_size);
922 927
923 dump_stack(); 928 dump_stack();
924 } 929 }
@@ -937,7 +942,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
937 return should_failslab(s->object_size, flags, s->flags); 942 return should_failslab(s->object_size, flags, s->flags);
938} 943}
939 944
940static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 945static inline void slab_post_alloc_hook(struct kmem_cache *s,
946 gfp_t flags, void *object)
941{ 947{
942 flags &= gfp_allowed_mask; 948 flags &= gfp_allowed_mask;
943 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 949 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
@@ -1039,7 +1045,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
1039 init_tracking(s, object); 1045 init_tracking(s, object);
1040} 1046}
1041 1047
1042static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page, 1048static noinline int alloc_debug_processing(struct kmem_cache *s,
1049 struct page *page,
1043 void *object, unsigned long addr) 1050 void *object, unsigned long addr)
1044{ 1051{
1045 if (!check_slab(s, page)) 1052 if (!check_slab(s, page))
@@ -1743,7 +1750,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
1743/* 1750/*
1744 * Remove the cpu slab 1751 * Remove the cpu slab
1745 */ 1752 */
1746static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist) 1753static void deactivate_slab(struct kmem_cache *s, struct page *page,
1754 void *freelist)
1747{ 1755{
1748 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; 1756 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
1749 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1757 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -1999,7 +2007,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1999 page->pobjects = pobjects; 2007 page->pobjects = pobjects;
2000 page->next = oldpage; 2008 page->next = oldpage;
2001 2009
2002 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); 2010 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2011 != oldpage);
2003#endif 2012#endif
2004} 2013}
2005 2014
@@ -2169,8 +2178,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2169} 2178}
2170 2179
2171/* 2180/*
2172 * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist 2181 * Check the page->freelist of a page and either transfer the freelist to the
2173 * or deactivate the page. 2182 * per cpu freelist or deactivate the page.
2174 * 2183 *
2175 * The page is still frozen if the return value is not NULL. 2184 * The page is still frozen if the return value is not NULL.
2176 * 2185 *
@@ -2314,7 +2323,8 @@ new_slab:
2314 goto load_freelist; 2323 goto load_freelist;
2315 2324
2316 /* Only entered in the debug case */ 2325 /* Only entered in the debug case */
2317 if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr)) 2326 if (kmem_cache_debug(s) &&
2327 !alloc_debug_processing(s, page, freelist, addr))
2318 goto new_slab; /* Slab failed checks. Next slab needed */ 2328 goto new_slab; /* Slab failed checks. Next slab needed */
2319 2329
2320 deactivate_slab(s, page, get_freepointer(s, freelist)); 2330 deactivate_slab(s, page, get_freepointer(s, freelist));
@@ -2372,7 +2382,7 @@ redo:
2372 2382
2373 object = c->freelist; 2383 object = c->freelist;
2374 page = c->page; 2384 page = c->page;
2375 if (unlikely(!object || !page || !node_match(page, node))) 2385 if (unlikely(!object || !node_match(page, node)))
2376 object = __slab_alloc(s, gfpflags, node, addr, c); 2386 object = __slab_alloc(s, gfpflags, node, addr, c);
2377 2387
2378 else { 2388 else {
@@ -2382,13 +2392,15 @@ redo:
2382 * The cmpxchg will only match if there was no additional 2392 * The cmpxchg will only match if there was no additional
2383 * operation and if we are on the right processor. 2393 * operation and if we are on the right processor.
2384 * 2394 *
2385 * The cmpxchg does the following atomically (without lock semantics!) 2395 * The cmpxchg does the following atomically (without lock
2396 * semantics!)
2386 * 1. Relocate first pointer to the current per cpu area. 2397 * 1. Relocate first pointer to the current per cpu area.
2387 * 2. Verify that tid and freelist have not been changed 2398 * 2. Verify that tid and freelist have not been changed
2388 * 3. If they were not changed replace tid and freelist 2399 * 3. If they were not changed replace tid and freelist
2389 * 2400 *
2390 * Since this is without lock semantics the protection is only against 2401 * Since this is without lock semantics the protection is only
2391 * code executing on this cpu *not* from access by other cpus. 2402 * against code executing on this cpu *not* from access by
2403 * other cpus.
2392 */ 2404 */
2393 if (unlikely(!this_cpu_cmpxchg_double( 2405 if (unlikely(!this_cpu_cmpxchg_double(
2394 s->cpu_slab->freelist, s->cpu_slab->tid, 2406 s->cpu_slab->freelist, s->cpu_slab->tid,
@@ -2420,7 +2432,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2420{ 2432{
2421 void *ret = slab_alloc(s, gfpflags, _RET_IP_); 2433 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2422 2434
2423 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); 2435 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2436 s->size, gfpflags);
2424 2437
2425 return ret; 2438 return ret;
2426} 2439}
@@ -2434,14 +2447,6 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2434 return ret; 2447 return ret;
2435} 2448}
2436EXPORT_SYMBOL(kmem_cache_alloc_trace); 2449EXPORT_SYMBOL(kmem_cache_alloc_trace);
2437
2438void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
2439{
2440 void *ret = kmalloc_order(size, flags, order);
2441 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
2442 return ret;
2443}
2444EXPORT_SYMBOL(kmalloc_order_trace);
2445#endif 2450#endif
2446 2451
2447#ifdef CONFIG_NUMA 2452#ifdef CONFIG_NUMA
@@ -2512,8 +2517,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2512 if (kmem_cache_has_cpu_partial(s) && !prior) 2517 if (kmem_cache_has_cpu_partial(s) && !prior)
2513 2518
2514 /* 2519 /*
2515 * Slab was on no list before and will be partially empty 2520 * Slab was on no list before and will be
2516 * We can defer the list move and instead freeze it. 2521 * partially empty
2522 * We can defer the list move and instead
2523 * freeze it.
2517 */ 2524 */
2518 new.frozen = 1; 2525 new.frozen = 1;
2519 2526
@@ -3071,8 +3078,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3071 * A) The number of objects from per cpu partial slabs dumped to the 3078 * A) The number of objects from per cpu partial slabs dumped to the
3072 * per node list when we reach the limit. 3079 * per node list when we reach the limit.
3073 * B) The number of objects in cpu partial slabs to extract from the 3080 * B) The number of objects in cpu partial slabs to extract from the
3074 * per node list when we run out of per cpu objects. We only fetch 50% 3081 * per node list when we run out of per cpu objects. We only fetch
3075 * to keep some capacity around for frees. 3082 * 50% to keep some capacity around for frees.
3076 */ 3083 */
3077 if (!kmem_cache_has_cpu_partial(s)) 3084 if (!kmem_cache_has_cpu_partial(s))
3078 s->cpu_partial = 0; 3085 s->cpu_partial = 0;
@@ -3099,8 +3106,8 @@ error:
3099 if (flags & SLAB_PANIC) 3106 if (flags & SLAB_PANIC)
3100 panic("Cannot create slab %s size=%lu realsize=%u " 3107 panic("Cannot create slab %s size=%lu realsize=%u "
3101 "order=%u offset=%u flags=%lx\n", 3108 "order=%u offset=%u flags=%lx\n",
3102 s->name, (unsigned long)s->size, s->size, oo_order(s->oo), 3109 s->name, (unsigned long)s->size, s->size,
3103 s->offset, flags); 3110 oo_order(s->oo), s->offset, flags);
3104 return -EINVAL; 3111 return -EINVAL;
3105} 3112}
3106 3113
@@ -3316,42 +3323,6 @@ size_t ksize(const void *object)
3316} 3323}
3317EXPORT_SYMBOL(ksize); 3324EXPORT_SYMBOL(ksize);
3318 3325
3319#ifdef CONFIG_SLUB_DEBUG
3320bool verify_mem_not_deleted(const void *x)
3321{
3322 struct page *page;
3323 void *object = (void *)x;
3324 unsigned long flags;
3325 bool rv;
3326
3327 if (unlikely(ZERO_OR_NULL_PTR(x)))
3328 return false;
3329
3330 local_irq_save(flags);
3331
3332 page = virt_to_head_page(x);
3333 if (unlikely(!PageSlab(page))) {
3334 /* maybe it was from stack? */
3335 rv = true;
3336 goto out_unlock;
3337 }
3338
3339 slab_lock(page);
3340 if (on_freelist(page->slab_cache, page, object)) {
3341 object_err(page->slab_cache, page, object, "Object is on free-list");
3342 rv = false;
3343 } else {
3344 rv = true;
3345 }
3346 slab_unlock(page);
3347
3348out_unlock:
3349 local_irq_restore(flags);
3350 return rv;
3351}
3352EXPORT_SYMBOL(verify_mem_not_deleted);
3353#endif
3354
3355void kfree(const void *x) 3326void kfree(const void *x)
3356{ 3327{
3357 struct page *page; 3328 struct page *page;
@@ -4162,15 +4133,17 @@ static int list_locations(struct kmem_cache *s, char *buf,
4162 !cpumask_empty(to_cpumask(l->cpus)) && 4133 !cpumask_empty(to_cpumask(l->cpus)) &&
4163 len < PAGE_SIZE - 60) { 4134 len < PAGE_SIZE - 60) {
4164 len += sprintf(buf + len, " cpus="); 4135 len += sprintf(buf + len, " cpus=");
4165 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 4136 len += cpulist_scnprintf(buf + len,
4137 PAGE_SIZE - len - 50,
4166 to_cpumask(l->cpus)); 4138 to_cpumask(l->cpus));
4167 } 4139 }
4168 4140
4169 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && 4141 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4170 len < PAGE_SIZE - 60) { 4142 len < PAGE_SIZE - 60) {
4171 len += sprintf(buf + len, " nodes="); 4143 len += sprintf(buf + len, " nodes=");
4172 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 4144 len += nodelist_scnprintf(buf + len,
4173 l->nodes); 4145 PAGE_SIZE - len - 50,
4146 l->nodes);
4174 } 4147 }
4175 4148
4176 len += sprintf(buf + len, "\n"); 4149 len += sprintf(buf + len, "\n");
@@ -4268,18 +4241,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4268 int node; 4241 int node;
4269 int x; 4242 int x;
4270 unsigned long *nodes; 4243 unsigned long *nodes;
4271 unsigned long *per_cpu;
4272 4244
4273 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); 4245 nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
4274 if (!nodes) 4246 if (!nodes)
4275 return -ENOMEM; 4247 return -ENOMEM;
4276 per_cpu = nodes + nr_node_ids;
4277 4248
4278 if (flags & SO_CPU) { 4249 if (flags & SO_CPU) {
4279 int cpu; 4250 int cpu;
4280 4251
4281 for_each_possible_cpu(cpu) { 4252 for_each_possible_cpu(cpu) {
4282 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 4253 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4254 cpu);
4283 int node; 4255 int node;
4284 struct page *page; 4256 struct page *page;
4285 4257
@@ -4304,8 +4276,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4304 total += x; 4276 total += x;
4305 nodes[node] += x; 4277 nodes[node] += x;
4306 } 4278 }
4307
4308 per_cpu[node]++;
4309 } 4279 }
4310 } 4280 }
4311 4281
@@ -4315,12 +4285,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4315 for_each_node_state(node, N_NORMAL_MEMORY) { 4285 for_each_node_state(node, N_NORMAL_MEMORY) {
4316 struct kmem_cache_node *n = get_node(s, node); 4286 struct kmem_cache_node *n = get_node(s, node);
4317 4287
4318 if (flags & SO_TOTAL) 4288 if (flags & SO_TOTAL)
4319 x = atomic_long_read(&n->total_objects); 4289 x = atomic_long_read(&n->total_objects);
4320 else if (flags & SO_OBJECTS) 4290 else if (flags & SO_OBJECTS)
4321 x = atomic_long_read(&n->total_objects) - 4291 x = atomic_long_read(&n->total_objects) -
4322 count_partial(n, count_free); 4292 count_partial(n, count_free);
4323
4324 else 4293 else
4325 x = atomic_long_read(&n->nr_slabs); 4294 x = atomic_long_read(&n->nr_slabs);
4326 total += x; 4295 total += x;
@@ -5136,7 +5105,8 @@ static char *create_unique_id(struct kmem_cache *s)
5136 5105
5137#ifdef CONFIG_MEMCG_KMEM 5106#ifdef CONFIG_MEMCG_KMEM
5138 if (!is_root_cache(s)) 5107 if (!is_root_cache(s))
5139 p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg)); 5108 p += sprintf(p, "-%08d",
5109 memcg_cache_id(s->memcg_params->memcg));
5140#endif 5110#endif
5141 5111
5142 BUG_ON(p > name + ID_STR_LENGTH - 1); 5112 BUG_ON(p > name + ID_STR_LENGTH - 1);