aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c38
1 files changed, 27 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 57707f01bcfb..3b482c863002 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -123,6 +123,15 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
123#endif 123#endif
124} 124}
125 125
126static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
127{
128#ifdef CONFIG_SLUB_CPU_PARTIAL
129 return !kmem_cache_debug(s);
130#else
131 return false;
132#endif
133}
134
126/* 135/*
127 * Issues still to be resolved: 136 * Issues still to be resolved:
128 * 137 *
@@ -1573,7 +1582,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1573 put_cpu_partial(s, page, 0); 1582 put_cpu_partial(s, page, 0);
1574 stat(s, CPU_PARTIAL_NODE); 1583 stat(s, CPU_PARTIAL_NODE);
1575 } 1584 }
1576 if (kmem_cache_debug(s) || available > s->cpu_partial / 2) 1585 if (!kmem_cache_has_cpu_partial(s)
1586 || available > s->cpu_partial / 2)
1577 break; 1587 break;
1578 1588
1579 } 1589 }
@@ -1884,6 +1894,7 @@ redo:
1884static void unfreeze_partials(struct kmem_cache *s, 1894static void unfreeze_partials(struct kmem_cache *s,
1885 struct kmem_cache_cpu *c) 1895 struct kmem_cache_cpu *c)
1886{ 1896{
1897#ifdef CONFIG_SLUB_CPU_PARTIAL
1887 struct kmem_cache_node *n = NULL, *n2 = NULL; 1898 struct kmem_cache_node *n = NULL, *n2 = NULL;
1888 struct page *page, *discard_page = NULL; 1899 struct page *page, *discard_page = NULL;
1889 1900
@@ -1938,6 +1949,7 @@ static void unfreeze_partials(struct kmem_cache *s,
1938 discard_slab(s, page); 1949 discard_slab(s, page);
1939 stat(s, FREE_SLAB); 1950 stat(s, FREE_SLAB);
1940 } 1951 }
1952#endif
1941} 1953}
1942 1954
1943/* 1955/*
@@ -1951,10 +1963,14 @@ static void unfreeze_partials(struct kmem_cache *s,
1951 */ 1963 */
1952static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) 1964static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1953{ 1965{
1966#ifdef CONFIG_SLUB_CPU_PARTIAL
1954 struct page *oldpage; 1967 struct page *oldpage;
1955 int pages; 1968 int pages;
1956 int pobjects; 1969 int pobjects;
1957 1970
1971 if (!s->cpu_partial)
1972 return;
1973
1958 do { 1974 do {
1959 pages = 0; 1975 pages = 0;
1960 pobjects = 0; 1976 pobjects = 0;
@@ -1987,6 +2003,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1987 page->next = oldpage; 2003 page->next = oldpage;
1988 2004
1989 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); 2005 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
2006#endif
1990} 2007}
1991 2008
1992static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 2009static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@ -2358,7 +2375,7 @@ redo:
2358 2375
2359 object = c->freelist; 2376 object = c->freelist;
2360 page = c->page; 2377 page = c->page;
2361 if (unlikely(!object || !node_match(page, node))) 2378 if (unlikely(!object || !page || !node_match(page, node)))
2362 object = __slab_alloc(s, gfpflags, node, addr, c); 2379 object = __slab_alloc(s, gfpflags, node, addr, c);
2363 2380
2364 else { 2381 else {
@@ -2495,7 +2512,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2495 new.inuse--; 2512 new.inuse--;
2496 if ((!new.inuse || !prior) && !was_frozen) { 2513 if ((!new.inuse || !prior) && !was_frozen) {
2497 2514
2498 if (!kmem_cache_debug(s) && !prior) 2515 if (kmem_cache_has_cpu_partial(s) && !prior)
2499 2516
2500 /* 2517 /*
2501 * Slab was on no list before and will be partially empty 2518 * Slab was on no list before and will be partially empty
@@ -2550,8 +2567,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2550 * Objects left in the slab. If it was not on the partial list before 2567 * Objects left in the slab. If it was not on the partial list before
2551 * then add it. 2568 * then add it.
2552 */ 2569 */
2553 if (kmem_cache_debug(s) && unlikely(!prior)) { 2570 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2554 remove_full(s, page); 2571 if (kmem_cache_debug(s))
2572 remove_full(s, page);
2555 add_partial(n, page, DEACTIVATE_TO_TAIL); 2573 add_partial(n, page, DEACTIVATE_TO_TAIL);
2556 stat(s, FREE_ADD_PARTIAL); 2574 stat(s, FREE_ADD_PARTIAL);
2557 } 2575 }
@@ -3059,7 +3077,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3059 * per node list when we run out of per cpu objects. We only fetch 50% 3077 * per node list when we run out of per cpu objects. We only fetch 50%
3060 * to keep some capacity around for frees. 3078 * to keep some capacity around for frees.
3061 */ 3079 */
3062 if (kmem_cache_debug(s)) 3080 if (!kmem_cache_has_cpu_partial(s))
3063 s->cpu_partial = 0; 3081 s->cpu_partial = 0;
3064 else if (s->size >= PAGE_SIZE) 3082 else if (s->size >= PAGE_SIZE)
3065 s->cpu_partial = 2; 3083 s->cpu_partial = 2;
@@ -4456,7 +4474,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4456 err = strict_strtoul(buf, 10, &objects); 4474 err = strict_strtoul(buf, 10, &objects);
4457 if (err) 4475 if (err)
4458 return err; 4476 return err;
4459 if (objects && kmem_cache_debug(s)) 4477 if (objects && !kmem_cache_has_cpu_partial(s))
4460 return -EINVAL; 4478 return -EINVAL;
4461 4479
4462 s->cpu_partial = objects; 4480 s->cpu_partial = objects;
@@ -5269,7 +5287,6 @@ __initcall(slab_sysfs_init);
5269#ifdef CONFIG_SLABINFO 5287#ifdef CONFIG_SLABINFO
5270void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 5288void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5271{ 5289{
5272 unsigned long nr_partials = 0;
5273 unsigned long nr_slabs = 0; 5290 unsigned long nr_slabs = 0;
5274 unsigned long nr_objs = 0; 5291 unsigned long nr_objs = 0;
5275 unsigned long nr_free = 0; 5292 unsigned long nr_free = 0;
@@ -5281,9 +5298,8 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5281 if (!n) 5298 if (!n)
5282 continue; 5299 continue;
5283 5300
5284 nr_partials += n->nr_partial; 5301 nr_slabs += node_nr_slabs(n);
5285 nr_slabs += atomic_long_read(&n->nr_slabs); 5302 nr_objs += node_nr_objs(n);
5286 nr_objs += atomic_long_read(&n->total_objects);
5287 nr_free += count_partial(n, count_free); 5303 nr_free += count_partial(n, count_free);
5288 } 5304 }
5289 5305