aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-14 18:14:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-14 18:14:29 -0400
commit54be8200198ddfc6cb396720460c19881fac2d5a (patch)
tree58ccab6e0cfb35b30e7e16804f15fe9c94628f12
parent41d9884c44237cd66e2bdbc412028b29196b344c (diff)
parentc25f195e828f847735c7626b5693ddc3b853d245 (diff)
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab update from Pekka Enberg: "Highlights: - Fix for boot-time problems on some architectures due to init_lock_keys() not respecting kmalloc_caches boundaries (Christoph Lameter) - CONFIG_SLUB_CPU_PARTIAL requested by RT folks (Joonsoo Kim) - Fix for excessive slab freelist draining (Wanpeng Li) - SLUB and SLOB cleanups and fixes (various people)" I ended up editing the branch, and this avoids two commits at the end that were immediately reverted, and I instead just applied the oneliner fix in between myself. * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux slub: Check for page NULL before doing the node_match check mm/slab: Give s_next and s_stop slab-specific names slob: Check for NULL pointer before calling ctor() slub: Make cpu partial slab support configurable slab: add kmalloc() to kernel API documentation slab: fix init_lock_keys slob: use DIV_ROUND_UP where possible slub: do not put a slab to cpu partial list when cpu_partial is 0 mm/slub: Use node_nr_slabs and node_nr_objs in get_slabinfo mm/slub: Drop unnecessary nr_partials mm/slab: Fix /proc/slabinfo unwriteable for slab mm/slab: Sharing s_next and s_stop between slab and slub mm/slab: Fix drain freelist excessively slob: Rework #ifdeffery in slab.h mm, slab: moved kmem_cache_alloc_node comment to correct place
-rw-r--r--include/linux/slab.h57
-rw-r--r--include/linux/slob_def.h8
-rw-r--r--init/Kconfig11
-rw-r--r--mm/slab.c51
-rw-r--r--mm/slab.h3
-rw-r--r--mm/slab_common.c18
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c38
8 files changed, 121 insertions, 69 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 0c621752caa6..6c5cc0ea8713 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -169,11 +169,7 @@ struct kmem_cache {
169 struct list_head list; /* List of all slab caches on the system */ 169 struct list_head list; /* List of all slab caches on the system */
170}; 170};
171 171
172#define KMALLOC_MAX_SIZE (1UL << 30) 172#endif /* CONFIG_SLOB */
173
174#include <linux/slob_def.h>
175
176#else /* CONFIG_SLOB */
177 173
178/* 174/*
179 * Kmalloc array related definitions 175 * Kmalloc array related definitions
@@ -195,7 +191,9 @@ struct kmem_cache {
195#ifndef KMALLOC_SHIFT_LOW 191#ifndef KMALLOC_SHIFT_LOW
196#define KMALLOC_SHIFT_LOW 5 192#define KMALLOC_SHIFT_LOW 5
197#endif 193#endif
198#else 194#endif
195
196#ifdef CONFIG_SLUB
199/* 197/*
200 * SLUB allocates up to order 2 pages directly and otherwise 198 * SLUB allocates up to order 2 pages directly and otherwise
201 * passes the request to the page allocator. 199 * passes the request to the page allocator.
@@ -207,6 +205,19 @@ struct kmem_cache {
207#endif 205#endif
208#endif 206#endif
209 207
208#ifdef CONFIG_SLOB
209/*
210 * SLOB passes all page size and larger requests to the page allocator.
211 * No kmalloc array is necessary since objects of different sizes can
212 * be allocated from the same page.
213 */
214#define KMALLOC_SHIFT_MAX 30
215#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
216#ifndef KMALLOC_SHIFT_LOW
217#define KMALLOC_SHIFT_LOW 3
218#endif
219#endif
220
210/* Maximum allocatable size */ 221/* Maximum allocatable size */
211#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 222#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
212/* Maximum size for which we actually use a slab cache */ 223/* Maximum size for which we actually use a slab cache */
@@ -221,6 +232,7 @@ struct kmem_cache {
221#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 232#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
222#endif 233#endif
223 234
235#ifndef CONFIG_SLOB
224extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 236extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
225#ifdef CONFIG_ZONE_DMA 237#ifdef CONFIG_ZONE_DMA
226extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; 238extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
@@ -275,13 +287,18 @@ static __always_inline int kmalloc_index(size_t size)
275 /* Will never be reached. Needed because the compiler may complain */ 287 /* Will never be reached. Needed because the compiler may complain */
276 return -1; 288 return -1;
277} 289}
290#endif /* !CONFIG_SLOB */
278 291
279#ifdef CONFIG_SLAB 292#ifdef CONFIG_SLAB
280#include <linux/slab_def.h> 293#include <linux/slab_def.h>
281#elif defined(CONFIG_SLUB) 294#endif
295
296#ifdef CONFIG_SLUB
282#include <linux/slub_def.h> 297#include <linux/slub_def.h>
283#else 298#endif
284#error "Unknown slab allocator" 299
300#ifdef CONFIG_SLOB
301#include <linux/slob_def.h>
285#endif 302#endif
286 303
287/* 304/*
@@ -291,6 +308,7 @@ static __always_inline int kmalloc_index(size_t size)
291 */ 308 */
292static __always_inline int kmalloc_size(int n) 309static __always_inline int kmalloc_size(int n)
293{ 310{
311#ifndef CONFIG_SLOB
294 if (n > 2) 312 if (n > 2)
295 return 1 << n; 313 return 1 << n;
296 314
@@ -299,10 +317,9 @@ static __always_inline int kmalloc_size(int n)
299 317
300 if (n == 2 && KMALLOC_MIN_SIZE <= 64) 318 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
301 return 192; 319 return 192;
302 320#endif
303 return 0; 321 return 0;
304} 322}
305#endif /* !CONFIG_SLOB */
306 323
307/* 324/*
308 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 325 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
@@ -356,9 +373,8 @@ int cache_show(struct kmem_cache *s, struct seq_file *m);
356void print_slabinfo_header(struct seq_file *m); 373void print_slabinfo_header(struct seq_file *m);
357 374
358/** 375/**
359 * kmalloc_array - allocate memory for an array. 376 * kmalloc - allocate memory
360 * @n: number of elements. 377 * @size: how many bytes of memory are required.
361 * @size: element size.
362 * @flags: the type of memory to allocate. 378 * @flags: the type of memory to allocate.
363 * 379 *
364 * The @flags argument may be one of: 380 * The @flags argument may be one of:
@@ -405,6 +421,17 @@ void print_slabinfo_header(struct seq_file *m);
405 * There are other flags available as well, but these are not intended 421 * There are other flags available as well, but these are not intended
406 * for general use, and so are not documented here. For a full list of 422 * for general use, and so are not documented here. For a full list of
407 * potential flags, always refer to linux/gfp.h. 423 * potential flags, always refer to linux/gfp.h.
424 *
425 * kmalloc is the normal method of allocating memory
426 * in the kernel.
427 */
428static __always_inline void *kmalloc(size_t size, gfp_t flags);
429
430/**
431 * kmalloc_array - allocate memory for an array.
432 * @n: number of elements.
433 * @size: element size.
434 * @flags: the type of memory to allocate (see kmalloc).
408 */ 435 */
409static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) 436static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
410{ 437{
@@ -428,7 +455,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
428/** 455/**
429 * kmalloc_node - allocate memory from a specific node 456 * kmalloc_node - allocate memory from a specific node
430 * @size: how many bytes of memory are required. 457 * @size: how many bytes of memory are required.
431 * @flags: the type of memory to allocate (see kcalloc). 458 * @flags: the type of memory to allocate (see kmalloc).
432 * @node: node to allocate from. 459 * @node: node to allocate from.
433 * 460 *
434 * kmalloc() for non-local nodes, used to allocate from a specific node 461 * kmalloc() for non-local nodes, used to allocate from a specific node
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index f28e14a12e3f..095a5a4a8516 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -18,14 +18,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
18 return __kmalloc_node(size, flags, node); 18 return __kmalloc_node(size, flags, node);
19} 19}
20 20
21/**
22 * kmalloc - allocate memory
23 * @size: how many bytes of memory are required.
24 * @flags: the type of memory to allocate (see kcalloc).
25 *
26 * kmalloc is the normal method of allocating memory
27 * in the kernel.
28 */
29static __always_inline void *kmalloc(size_t size, gfp_t flags) 21static __always_inline void *kmalloc(size_t size, gfp_t flags)
30{ 22{
31 return __kmalloc_node(size, flags, NUMA_NO_NODE); 23 return __kmalloc_node(size, flags, NUMA_NO_NODE);
diff --git a/init/Kconfig b/init/Kconfig
index 54d3fa5ae723..247084be0590 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1596,6 +1596,17 @@ config SLOB
1596 1596
1597endchoice 1597endchoice
1598 1598
1599config SLUB_CPU_PARTIAL
1600 default y
1601 depends on SLUB
1602 bool "SLUB per cpu partial cache"
1603 help
1604 Per cpu partial caches accellerate objects allocation and freeing
1605 that is local to a processor at the price of more indeterminism
1606 in the latency of the free. On overflow these caches will be cleared
1607 which requires the taking of locks that may cause latency spikes.
1608 Typically one would choose no for a realtime system.
1609
1599config MMAP_ALLOW_UNINITIALIZED 1610config MMAP_ALLOW_UNINITIALIZED
1600 bool "Allow mmapped anonymous memory to be uninitialized" 1611 bool "Allow mmapped anonymous memory to be uninitialized"
1601 depends on EXPERT && !MMU 1612 depends on EXPERT && !MMU
diff --git a/mm/slab.c b/mm/slab.c
index 8ccd296c6d9c..35cb0c861508 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -565,7 +565,7 @@ static void init_node_lock_keys(int q)
565 if (slab_state < UP) 565 if (slab_state < UP)
566 return; 566 return;
567 567
568 for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { 568 for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
569 struct kmem_cache_node *n; 569 struct kmem_cache_node *n;
570 struct kmem_cache *cache = kmalloc_caches[i]; 570 struct kmem_cache *cache = kmalloc_caches[i];
571 571
@@ -1180,6 +1180,12 @@ static int init_cache_node_node(int node)
1180 return 0; 1180 return 0;
1181} 1181}
1182 1182
1183static inline int slabs_tofree(struct kmem_cache *cachep,
1184 struct kmem_cache_node *n)
1185{
1186 return (n->free_objects + cachep->num - 1) / cachep->num;
1187}
1188
1183static void __cpuinit cpuup_canceled(long cpu) 1189static void __cpuinit cpuup_canceled(long cpu)
1184{ 1190{
1185 struct kmem_cache *cachep; 1191 struct kmem_cache *cachep;
@@ -1241,7 +1247,7 @@ free_array_cache:
1241 n = cachep->node[node]; 1247 n = cachep->node[node];
1242 if (!n) 1248 if (!n)
1243 continue; 1249 continue;
1244 drain_freelist(cachep, n, n->free_objects); 1250 drain_freelist(cachep, n, slabs_tofree(cachep, n));
1245 } 1251 }
1246} 1252}
1247 1253
@@ -1408,7 +1414,7 @@ static int __meminit drain_cache_node_node(int node)
1408 if (!n) 1414 if (!n)
1409 continue; 1415 continue;
1410 1416
1411 drain_freelist(cachep, n, n->free_objects); 1417 drain_freelist(cachep, n, slabs_tofree(cachep, n));
1412 1418
1413 if (!list_empty(&n->slabs_full) || 1419 if (!list_empty(&n->slabs_full) ||
1414 !list_empty(&n->slabs_partial)) { 1420 !list_empty(&n->slabs_partial)) {
@@ -2532,7 +2538,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
2532 if (!n) 2538 if (!n)
2533 continue; 2539 continue;
2534 2540
2535 drain_freelist(cachep, n, n->free_objects); 2541 drain_freelist(cachep, n, slabs_tofree(cachep, n));
2536 2542
2537 ret += !list_empty(&n->slabs_full) || 2543 ret += !list_empty(&n->slabs_full) ||
2538 !list_empty(&n->slabs_partial); 2544 !list_empty(&n->slabs_partial);
@@ -3338,18 +3344,6 @@ done:
3338 return obj; 3344 return obj;
3339} 3345}
3340 3346
3341/**
3342 * kmem_cache_alloc_node - Allocate an object on the specified node
3343 * @cachep: The cache to allocate from.
3344 * @flags: See kmalloc().
3345 * @nodeid: node number of the target node.
3346 * @caller: return address of caller, used for debug information
3347 *
3348 * Identical to kmem_cache_alloc but it will allocate memory on the given
3349 * node, which can improve the performance for cpu bound structures.
3350 *
3351 * Fallback to other node is possible if __GFP_THISNODE is not set.
3352 */
3353static __always_inline void * 3347static __always_inline void *
3354slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3348slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3355 unsigned long caller) 3349 unsigned long caller)
@@ -3643,6 +3637,17 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
3643#endif 3637#endif
3644 3638
3645#ifdef CONFIG_NUMA 3639#ifdef CONFIG_NUMA
3640/**
3641 * kmem_cache_alloc_node - Allocate an object on the specified node
3642 * @cachep: The cache to allocate from.
3643 * @flags: See kmalloc().
3644 * @nodeid: node number of the target node.
3645 *
3646 * Identical to kmem_cache_alloc but it will allocate memory on the given
3647 * node, which can improve the performance for cpu bound structures.
3648 *
3649 * Fallback to other node is possible if __GFP_THISNODE is not set.
3650 */
3646void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3651void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3647{ 3652{
3648 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3653 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
@@ -4431,20 +4436,10 @@ static int leaks_show(struct seq_file *m, void *p)
4431 return 0; 4436 return 0;
4432} 4437}
4433 4438
4434static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4435{
4436 return seq_list_next(p, &slab_caches, pos);
4437}
4438
4439static void s_stop(struct seq_file *m, void *p)
4440{
4441 mutex_unlock(&slab_mutex);
4442}
4443
4444static const struct seq_operations slabstats_op = { 4439static const struct seq_operations slabstats_op = {
4445 .start = leaks_start, 4440 .start = leaks_start,
4446 .next = s_next, 4441 .next = slab_next,
4447 .stop = s_stop, 4442 .stop = slab_stop,
4448 .show = leaks_show, 4443 .show = leaks_show,
4449}; 4444};
4450 4445
diff --git a/mm/slab.h b/mm/slab.h
index f96b49e4704e..620ceeddbe1a 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -271,3 +271,6 @@ struct kmem_cache_node {
271#endif 271#endif
272 272
273}; 273};
274
275void *slab_next(struct seq_file *m, void *p, loff_t *pos);
276void slab_stop(struct seq_file *m, void *p);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 2d414508e9ec..538bade6df7d 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -497,6 +497,13 @@ void __init create_kmalloc_caches(unsigned long flags)
497 497
498 498
499#ifdef CONFIG_SLABINFO 499#ifdef CONFIG_SLABINFO
500
501#ifdef CONFIG_SLAB
502#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
503#else
504#define SLABINFO_RIGHTS S_IRUSR
505#endif
506
500void print_slabinfo_header(struct seq_file *m) 507void print_slabinfo_header(struct seq_file *m)
501{ 508{
502 /* 509 /*
@@ -531,12 +538,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
531 return seq_list_start(&slab_caches, *pos); 538 return seq_list_start(&slab_caches, *pos);
532} 539}
533 540
534static void *s_next(struct seq_file *m, void *p, loff_t *pos) 541void *slab_next(struct seq_file *m, void *p, loff_t *pos)
535{ 542{
536 return seq_list_next(p, &slab_caches, pos); 543 return seq_list_next(p, &slab_caches, pos);
537} 544}
538 545
539static void s_stop(struct seq_file *m, void *p) 546void slab_stop(struct seq_file *m, void *p)
540{ 547{
541 mutex_unlock(&slab_mutex); 548 mutex_unlock(&slab_mutex);
542} 549}
@@ -613,8 +620,8 @@ static int s_show(struct seq_file *m, void *p)
613 */ 620 */
614static const struct seq_operations slabinfo_op = { 621static const struct seq_operations slabinfo_op = {
615 .start = s_start, 622 .start = s_start,
616 .next = s_next, 623 .next = slab_next,
617 .stop = s_stop, 624 .stop = slab_stop,
618 .show = s_show, 625 .show = s_show,
619}; 626};
620 627
@@ -633,7 +640,8 @@ static const struct file_operations proc_slabinfo_operations = {
633 640
634static int __init slab_proc_init(void) 641static int __init slab_proc_init(void)
635{ 642{
636 proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations); 643 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
644 &proc_slabinfo_operations);
637 return 0; 645 return 0;
638} 646}
639module_init(slab_proc_init); 647module_init(slab_proc_init);
diff --git a/mm/slob.c b/mm/slob.c
index eeed4a05a2ef..91bd3f2dd2f0 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -122,7 +122,7 @@ static inline void clear_slob_page_free(struct page *sp)
122} 122}
123 123
124#define SLOB_UNIT sizeof(slob_t) 124#define SLOB_UNIT sizeof(slob_t)
125#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) 125#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
126 126
127/* 127/*
128 * struct slob_rcu is inserted at the tail of allocated slob blocks, which 128 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
@@ -554,7 +554,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
554 flags, node); 554 flags, node);
555 } 555 }
556 556
557 if (c->ctor) 557 if (b && c->ctor)
558 c->ctor(b); 558 c->ctor(b);
559 559
560 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); 560 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
diff --git a/mm/slub.c b/mm/slub.c
index 57707f01bcfb..3b482c863002 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -123,6 +123,15 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
123#endif 123#endif
124} 124}
125 125
126static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
127{
128#ifdef CONFIG_SLUB_CPU_PARTIAL
129 return !kmem_cache_debug(s);
130#else
131 return false;
132#endif
133}
134
126/* 135/*
127 * Issues still to be resolved: 136 * Issues still to be resolved:
128 * 137 *
@@ -1573,7 +1582,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1573 put_cpu_partial(s, page, 0); 1582 put_cpu_partial(s, page, 0);
1574 stat(s, CPU_PARTIAL_NODE); 1583 stat(s, CPU_PARTIAL_NODE);
1575 } 1584 }
1576 if (kmem_cache_debug(s) || available > s->cpu_partial / 2) 1585 if (!kmem_cache_has_cpu_partial(s)
1586 || available > s->cpu_partial / 2)
1577 break; 1587 break;
1578 1588
1579 } 1589 }
@@ -1884,6 +1894,7 @@ redo:
1884static void unfreeze_partials(struct kmem_cache *s, 1894static void unfreeze_partials(struct kmem_cache *s,
1885 struct kmem_cache_cpu *c) 1895 struct kmem_cache_cpu *c)
1886{ 1896{
1897#ifdef CONFIG_SLUB_CPU_PARTIAL
1887 struct kmem_cache_node *n = NULL, *n2 = NULL; 1898 struct kmem_cache_node *n = NULL, *n2 = NULL;
1888 struct page *page, *discard_page = NULL; 1899 struct page *page, *discard_page = NULL;
1889 1900
@@ -1938,6 +1949,7 @@ static void unfreeze_partials(struct kmem_cache *s,
1938 discard_slab(s, page); 1949 discard_slab(s, page);
1939 stat(s, FREE_SLAB); 1950 stat(s, FREE_SLAB);
1940 } 1951 }
1952#endif
1941} 1953}
1942 1954
1943/* 1955/*
@@ -1951,10 +1963,14 @@ static void unfreeze_partials(struct kmem_cache *s,
1951 */ 1963 */
1952static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) 1964static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1953{ 1965{
1966#ifdef CONFIG_SLUB_CPU_PARTIAL
1954 struct page *oldpage; 1967 struct page *oldpage;
1955 int pages; 1968 int pages;
1956 int pobjects; 1969 int pobjects;
1957 1970
1971 if (!s->cpu_partial)
1972 return;
1973
1958 do { 1974 do {
1959 pages = 0; 1975 pages = 0;
1960 pobjects = 0; 1976 pobjects = 0;
@@ -1987,6 +2003,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1987 page->next = oldpage; 2003 page->next = oldpage;
1988 2004
1989 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); 2005 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
2006#endif
1990} 2007}
1991 2008
1992static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 2009static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@ -2358,7 +2375,7 @@ redo:
2358 2375
2359 object = c->freelist; 2376 object = c->freelist;
2360 page = c->page; 2377 page = c->page;
2361 if (unlikely(!object || !node_match(page, node))) 2378 if (unlikely(!object || !page || !node_match(page, node)))
2362 object = __slab_alloc(s, gfpflags, node, addr, c); 2379 object = __slab_alloc(s, gfpflags, node, addr, c);
2363 2380
2364 else { 2381 else {
@@ -2495,7 +2512,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2495 new.inuse--; 2512 new.inuse--;
2496 if ((!new.inuse || !prior) && !was_frozen) { 2513 if ((!new.inuse || !prior) && !was_frozen) {
2497 2514
2498 if (!kmem_cache_debug(s) && !prior) 2515 if (kmem_cache_has_cpu_partial(s) && !prior)
2499 2516
2500 /* 2517 /*
2501 * Slab was on no list before and will be partially empty 2518 * Slab was on no list before and will be partially empty
@@ -2550,8 +2567,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2550 * Objects left in the slab. If it was not on the partial list before 2567 * Objects left in the slab. If it was not on the partial list before
2551 * then add it. 2568 * then add it.
2552 */ 2569 */
2553 if (kmem_cache_debug(s) && unlikely(!prior)) { 2570 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2554 remove_full(s, page); 2571 if (kmem_cache_debug(s))
2572 remove_full(s, page);
2555 add_partial(n, page, DEACTIVATE_TO_TAIL); 2573 add_partial(n, page, DEACTIVATE_TO_TAIL);
2556 stat(s, FREE_ADD_PARTIAL); 2574 stat(s, FREE_ADD_PARTIAL);
2557 } 2575 }
@@ -3059,7 +3077,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3059 * per node list when we run out of per cpu objects. We only fetch 50% 3077 * per node list when we run out of per cpu objects. We only fetch 50%
3060 * to keep some capacity around for frees. 3078 * to keep some capacity around for frees.
3061 */ 3079 */
3062 if (kmem_cache_debug(s)) 3080 if (!kmem_cache_has_cpu_partial(s))
3063 s->cpu_partial = 0; 3081 s->cpu_partial = 0;
3064 else if (s->size >= PAGE_SIZE) 3082 else if (s->size >= PAGE_SIZE)
3065 s->cpu_partial = 2; 3083 s->cpu_partial = 2;
@@ -4456,7 +4474,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4456 err = strict_strtoul(buf, 10, &objects); 4474 err = strict_strtoul(buf, 10, &objects);
4457 if (err) 4475 if (err)
4458 return err; 4476 return err;
4459 if (objects && kmem_cache_debug(s)) 4477 if (objects && !kmem_cache_has_cpu_partial(s))
4460 return -EINVAL; 4478 return -EINVAL;
4461 4479
4462 s->cpu_partial = objects; 4480 s->cpu_partial = objects;
@@ -5269,7 +5287,6 @@ __initcall(slab_sysfs_init);
5269#ifdef CONFIG_SLABINFO 5287#ifdef CONFIG_SLABINFO
5270void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 5288void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5271{ 5289{
5272 unsigned long nr_partials = 0;
5273 unsigned long nr_slabs = 0; 5290 unsigned long nr_slabs = 0;
5274 unsigned long nr_objs = 0; 5291 unsigned long nr_objs = 0;
5275 unsigned long nr_free = 0; 5292 unsigned long nr_free = 0;
@@ -5281,9 +5298,8 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5281 if (!n) 5298 if (!n)
5282 continue; 5299 continue;
5283 5300
5284 nr_partials += n->nr_partial; 5301 nr_slabs += node_nr_slabs(n);
5285 nr_slabs += atomic_long_read(&n->nr_slabs); 5302 nr_objs += node_nr_objs(n);
5286 nr_objs += atomic_long_read(&n->total_objects);
5287 nr_free += count_partial(n, count_free); 5303 nr_free += count_partial(n, count_free);
5288 } 5304 }
5289 5305