diff options
author | Christoph Lameter <cl@linux.com> | 2011-08-09 17:12:27 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2011-08-19 12:34:27 -0400 |
commit | 49e2258586b423684f03c278149ab46d8f8b6700 (patch) | |
tree | d4404d1b09d6fe505da29a32602d193c4ef56ac9 /mm | |
parent | 497b66f2ecc97844493e6a147fd5a7e73f73f408 (diff) |
slub: per cpu cache for partial pages
Allow filling out the rest of the kmem_cache_cpu cacheline with pointers to
partial pages. The partial page list is used in slab_free() to avoid
per node lock taking.
In __slab_alloc() we can then take multiple partial pages off the per
node partial list in one go reducing node lock pressure.
We can also use the per cpu partial list in slab_alloc() to avoid scanning
partial lists for pages with free objects.
The main effect of a per cpu partial list is that the per node list_lock
is taken for batches of partial pages instead of individual ones.
Potential future enhancements:
1. The pickup from the partial list could be perhaps be done without disabling
interrupts with some work. The free path already puts the page into the
per cpu partial list without disabling interrupts.
2. __slab_free() may have some code paths that could use optimization.
Performance:
Before After
./hackbench 100 process 200000
Time: 1953.047 1564.614
./hackbench 100 process 20000
Time: 207.176 156.940
./hackbench 100 process 20000
Time: 204.468 156.940
./hackbench 100 process 20000
Time: 204.879 158.772
./hackbench 10 process 20000
Time: 20.153 15.853
./hackbench 10 process 20000
Time: 20.153 15.986
./hackbench 10 process 20000
Time: 19.363 16.111
./hackbench 1 process 20000
Time: 2.518 2.307
./hackbench 1 process 20000
Time: 2.258 2.339
./hackbench 1 process 20000
Time: 2.864 2.163
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 339 |
1 files changed, 292 insertions, 47 deletions
@@ -1560,7 +1560,7 @@ static inline void remove_partial(struct kmem_cache_node *n, | |||
1560 | */ | 1560 | */ |
1561 | static inline void *acquire_slab(struct kmem_cache *s, | 1561 | static inline void *acquire_slab(struct kmem_cache *s, |
1562 | struct kmem_cache_node *n, struct page *page, | 1562 | struct kmem_cache_node *n, struct page *page, |
1563 | struct kmem_cache_cpu *c) | 1563 | int mode) |
1564 | { | 1564 | { |
1565 | void *freelist; | 1565 | void *freelist; |
1566 | unsigned long counters; | 1566 | unsigned long counters; |
@@ -1575,7 +1575,8 @@ static inline void *acquire_slab(struct kmem_cache *s, | |||
1575 | freelist = page->freelist; | 1575 | freelist = page->freelist; |
1576 | counters = page->counters; | 1576 | counters = page->counters; |
1577 | new.counters = counters; | 1577 | new.counters = counters; |
1578 | new.inuse = page->objects; | 1578 | if (mode) |
1579 | new.inuse = page->objects; | ||
1579 | 1580 | ||
1580 | VM_BUG_ON(new.frozen); | 1581 | VM_BUG_ON(new.frozen); |
1581 | new.frozen = 1; | 1582 | new.frozen = 1; |
@@ -1586,34 +1587,20 @@ static inline void *acquire_slab(struct kmem_cache *s, | |||
1586 | "lock and freeze")); | 1587 | "lock and freeze")); |
1587 | 1588 | ||
1588 | remove_partial(n, page); | 1589 | remove_partial(n, page); |
1589 | 1590 | return freelist; | |
1590 | if (freelist) { | ||
1591 | /* Populate the per cpu freelist */ | ||
1592 | c->page = page; | ||
1593 | c->node = page_to_nid(page); | ||
1594 | stat(s, ALLOC_FROM_PARTIAL); | ||
1595 | |||
1596 | return freelist; | ||
1597 | } else { | ||
1598 | /* | ||
1599 | * Slab page came from the wrong list. No object to allocate | ||
1600 | * from. Put it onto the correct list and continue partial | ||
1601 | * scan. | ||
1602 | */ | ||
1603 | printk(KERN_ERR "SLUB: %s : Page without available objects on" | ||
1604 | " partial list\n", s->name); | ||
1605 | return NULL; | ||
1606 | } | ||
1607 | } | 1591 | } |
1608 | 1592 | ||
1593 | static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); | ||
1594 | |||
1609 | /* | 1595 | /* |
1610 | * Try to allocate a partial slab from a specific node. | 1596 | * Try to allocate a partial slab from a specific node. |
1611 | */ | 1597 | */ |
1612 | static void *get_partial_node(struct kmem_cache *s, | 1598 | static void *get_partial_node(struct kmem_cache *s, |
1613 | struct kmem_cache_node *n, struct kmem_cache_cpu *c) | 1599 | struct kmem_cache_node *n, struct kmem_cache_cpu *c) |
1614 | { | 1600 | { |
1615 | struct page *page; | 1601 | struct page *page, *page2; |
1616 | void *object; | 1602 | void *object = NULL; |
1603 | int count = 0; | ||
1617 | 1604 | ||
1618 | /* | 1605 | /* |
1619 | * Racy check. If we mistakenly see no partial slabs then we | 1606 | * Racy check. If we mistakenly see no partial slabs then we |
@@ -1625,13 +1612,28 @@ static void *get_partial_node(struct kmem_cache *s, | |||
1625 | return NULL; | 1612 | return NULL; |
1626 | 1613 | ||
1627 | spin_lock(&n->list_lock); | 1614 | spin_lock(&n->list_lock); |
1628 | list_for_each_entry(page, &n->partial, lru) { | 1615 | list_for_each_entry_safe(page, page2, &n->partial, lru) { |
1629 | object = acquire_slab(s, n, page, c); | 1616 | void *t = acquire_slab(s, n, page, count == 0); |
1630 | if (object) | 1617 | int available; |
1631 | goto out; | 1618 | |
1619 | if (!t) | ||
1620 | break; | ||
1621 | |||
1622 | if (!count) { | ||
1623 | c->page = page; | ||
1624 | c->node = page_to_nid(page); | ||
1625 | stat(s, ALLOC_FROM_PARTIAL); | ||
1626 | count++; | ||
1627 | object = t; | ||
1628 | available = page->objects - page->inuse; | ||
1629 | } else { | ||
1630 | page->freelist = t; | ||
1631 | available = put_cpu_partial(s, page, 0); | ||
1632 | } | ||
1633 | if (kmem_cache_debug(s) || available > s->cpu_partial / 2) | ||
1634 | break; | ||
1635 | |||
1632 | } | 1636 | } |
1633 | object = NULL; | ||
1634 | out: | ||
1635 | spin_unlock(&n->list_lock); | 1637 | spin_unlock(&n->list_lock); |
1636 | return object; | 1638 | return object; |
1637 | } | 1639 | } |
@@ -1926,6 +1928,123 @@ redo: | |||
1926 | } | 1928 | } |
1927 | } | 1929 | } |
1928 | 1930 | ||
1931 | /* Unfreeze all the cpu partial slabs */ | ||
1932 | static void unfreeze_partials(struct kmem_cache *s) | ||
1933 | { | ||
1934 | struct kmem_cache_node *n = NULL; | ||
1935 | struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); | ||
1936 | struct page *page; | ||
1937 | |||
1938 | while ((page = c->partial)) { | ||
1939 | enum slab_modes { M_PARTIAL, M_FREE }; | ||
1940 | enum slab_modes l, m; | ||
1941 | struct page new; | ||
1942 | struct page old; | ||
1943 | |||
1944 | c->partial = page->next; | ||
1945 | l = M_FREE; | ||
1946 | |||
1947 | do { | ||
1948 | |||
1949 | old.freelist = page->freelist; | ||
1950 | old.counters = page->counters; | ||
1951 | VM_BUG_ON(!old.frozen); | ||
1952 | |||
1953 | new.counters = old.counters; | ||
1954 | new.freelist = old.freelist; | ||
1955 | |||
1956 | new.frozen = 0; | ||
1957 | |||
1958 | if (!new.inuse && (!n || n->nr_partial < s->min_partial)) | ||
1959 | m = M_FREE; | ||
1960 | else { | ||
1961 | struct kmem_cache_node *n2 = get_node(s, | ||
1962 | page_to_nid(page)); | ||
1963 | |||
1964 | m = M_PARTIAL; | ||
1965 | if (n != n2) { | ||
1966 | if (n) | ||
1967 | spin_unlock(&n->list_lock); | ||
1968 | |||
1969 | n = n2; | ||
1970 | spin_lock(&n->list_lock); | ||
1971 | } | ||
1972 | } | ||
1973 | |||
1974 | if (l != m) { | ||
1975 | if (l == M_PARTIAL) | ||
1976 | remove_partial(n, page); | ||
1977 | else | ||
1978 | add_partial(n, page, 1); | ||
1979 | |||
1980 | l = m; | ||
1981 | } | ||
1982 | |||
1983 | } while (!cmpxchg_double_slab(s, page, | ||
1984 | old.freelist, old.counters, | ||
1985 | new.freelist, new.counters, | ||
1986 | "unfreezing slab")); | ||
1987 | |||
1988 | if (m == M_FREE) { | ||
1989 | stat(s, DEACTIVATE_EMPTY); | ||
1990 | discard_slab(s, page); | ||
1991 | stat(s, FREE_SLAB); | ||
1992 | } | ||
1993 | } | ||
1994 | |||
1995 | if (n) | ||
1996 | spin_unlock(&n->list_lock); | ||
1997 | } | ||
1998 | |||
1999 | /* | ||
2000 | * Put a page that was just frozen (in __slab_free) into a partial page | ||
2001 | * slot if available. This is done without interrupts disabled and without | ||
2002 | * preemption disabled. The cmpxchg is racy and may put the partial page | ||
2003 | * onto a random cpus partial slot. | ||
2004 | * | ||
2005 | * If we did not find a slot then simply move all the partials to the | ||
2006 | * per node partial list. | ||
2007 | */ | ||
2008 | int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | ||
2009 | { | ||
2010 | struct page *oldpage; | ||
2011 | int pages; | ||
2012 | int pobjects; | ||
2013 | |||
2014 | do { | ||
2015 | pages = 0; | ||
2016 | pobjects = 0; | ||
2017 | oldpage = this_cpu_read(s->cpu_slab->partial); | ||
2018 | |||
2019 | if (oldpage) { | ||
2020 | pobjects = oldpage->pobjects; | ||
2021 | pages = oldpage->pages; | ||
2022 | if (drain && pobjects > s->cpu_partial) { | ||
2023 | unsigned long flags; | ||
2024 | /* | ||
2025 | * partial array is full. Move the existing | ||
2026 | * set to the per node partial list. | ||
2027 | */ | ||
2028 | local_irq_save(flags); | ||
2029 | unfreeze_partials(s); | ||
2030 | local_irq_restore(flags); | ||
2031 | pobjects = 0; | ||
2032 | pages = 0; | ||
2033 | } | ||
2034 | } | ||
2035 | |||
2036 | pages++; | ||
2037 | pobjects += page->objects - page->inuse; | ||
2038 | |||
2039 | page->pages = pages; | ||
2040 | page->pobjects = pobjects; | ||
2041 | page->next = oldpage; | ||
2042 | |||
2043 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); | ||
2044 | stat(s, CPU_PARTIAL_FREE); | ||
2045 | return pobjects; | ||
2046 | } | ||
2047 | |||
1929 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | 2048 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |
1930 | { | 2049 | { |
1931 | stat(s, CPUSLAB_FLUSH); | 2050 | stat(s, CPUSLAB_FLUSH); |
@@ -1941,8 +2060,12 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) | |||
1941 | { | 2060 | { |
1942 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); | 2061 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); |
1943 | 2062 | ||
1944 | if (likely(c && c->page)) | 2063 | if (likely(c)) { |
1945 | flush_slab(s, c); | 2064 | if (c->page) |
2065 | flush_slab(s, c); | ||
2066 | |||
2067 | unfreeze_partials(s); | ||
2068 | } | ||
1946 | } | 2069 | } |
1947 | 2070 | ||
1948 | static void flush_cpu_slab(void *d) | 2071 | static void flush_cpu_slab(void *d) |
@@ -2066,8 +2189,6 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, | |||
2066 | * Slow path. The lockless freelist is empty or we need to perform | 2189 | * Slow path. The lockless freelist is empty or we need to perform |
2067 | * debugging duties. | 2190 | * debugging duties. |
2068 | * | 2191 | * |
2069 | * Interrupts are disabled. | ||
2070 | * | ||
2071 | * Processing is still very fast if new objects have been freed to the | 2192 | * Processing is still very fast if new objects have been freed to the |
2072 | * regular freelist. In that case we simply take over the regular freelist | 2193 | * regular freelist. In that case we simply take over the regular freelist |
2073 | * as the lockless freelist and zap the regular freelist. | 2194 | * as the lockless freelist and zap the regular freelist. |
@@ -2100,7 +2221,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
2100 | 2221 | ||
2101 | if (!c->page) | 2222 | if (!c->page) |
2102 | goto new_slab; | 2223 | goto new_slab; |
2103 | 2224 | redo: | |
2104 | if (unlikely(!node_match(c, node))) { | 2225 | if (unlikely(!node_match(c, node))) { |
2105 | stat(s, ALLOC_NODE_MISMATCH); | 2226 | stat(s, ALLOC_NODE_MISMATCH); |
2106 | deactivate_slab(s, c); | 2227 | deactivate_slab(s, c); |
@@ -2133,7 +2254,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
2133 | NULL, new.counters, | 2254 | NULL, new.counters, |
2134 | "__slab_alloc")); | 2255 | "__slab_alloc")); |
2135 | 2256 | ||
2136 | if (unlikely(!object)) { | 2257 | if (!object) { |
2137 | c->page = NULL; | 2258 | c->page = NULL; |
2138 | stat(s, DEACTIVATE_BYPASS); | 2259 | stat(s, DEACTIVATE_BYPASS); |
2139 | goto new_slab; | 2260 | goto new_slab; |
@@ -2148,6 +2269,17 @@ load_freelist: | |||
2148 | return object; | 2269 | return object; |
2149 | 2270 | ||
2150 | new_slab: | 2271 | new_slab: |
2272 | |||
2273 | if (c->partial) { | ||
2274 | c->page = c->partial; | ||
2275 | c->partial = c->page->next; | ||
2276 | c->node = page_to_nid(c->page); | ||
2277 | stat(s, CPU_PARTIAL_ALLOC); | ||
2278 | c->freelist = NULL; | ||
2279 | goto redo; | ||
2280 | } | ||
2281 | |||
2282 | /* Then do expensive stuff like retrieving pages from the partial lists */ | ||
2151 | object = get_partial(s, gfpflags, node, c); | 2283 | object = get_partial(s, gfpflags, node, c); |
2152 | 2284 | ||
2153 | if (unlikely(!object)) { | 2285 | if (unlikely(!object)) { |
@@ -2341,16 +2473,29 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2341 | was_frozen = new.frozen; | 2473 | was_frozen = new.frozen; |
2342 | new.inuse--; | 2474 | new.inuse--; |
2343 | if ((!new.inuse || !prior) && !was_frozen && !n) { | 2475 | if ((!new.inuse || !prior) && !was_frozen && !n) { |
2344 | n = get_node(s, page_to_nid(page)); | 2476 | |
2345 | /* | 2477 | if (!kmem_cache_debug(s) && !prior) |
2346 | * Speculatively acquire the list_lock. | 2478 | |
2347 | * If the cmpxchg does not succeed then we may | 2479 | /* |
2348 | * drop the list_lock without any processing. | 2480 | * Slab was on no list before and will be partially empty |
2349 | * | 2481 | * We can defer the list move and instead freeze it. |
2350 | * Otherwise the list_lock will synchronize with | 2482 | */ |
2351 | * other processors updating the list of slabs. | 2483 | new.frozen = 1; |
2352 | */ | 2484 | |
2353 | spin_lock_irqsave(&n->list_lock, flags); | 2485 | else { /* Needs to be taken off a list */ |
2486 | |||
2487 | n = get_node(s, page_to_nid(page)); | ||
2488 | /* | ||
2489 | * Speculatively acquire the list_lock. | ||
2490 | * If the cmpxchg does not succeed then we may | ||
2491 | * drop the list_lock without any processing. | ||
2492 | * | ||
2493 | * Otherwise the list_lock will synchronize with | ||
2494 | * other processors updating the list of slabs. | ||
2495 | */ | ||
2496 | spin_lock_irqsave(&n->list_lock, flags); | ||
2497 | |||
2498 | } | ||
2354 | } | 2499 | } |
2355 | inuse = new.inuse; | 2500 | inuse = new.inuse; |
2356 | 2501 | ||
@@ -2360,7 +2505,15 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2360 | "__slab_free")); | 2505 | "__slab_free")); |
2361 | 2506 | ||
2362 | if (likely(!n)) { | 2507 | if (likely(!n)) { |
2363 | /* | 2508 | |
2509 | /* | ||
2510 | * If we just froze the page then put it onto the | ||
2511 | * per cpu partial list. | ||
2512 | */ | ||
2513 | if (new.frozen && !was_frozen) | ||
2514 | put_cpu_partial(s, page, 1); | ||
2515 | |||
2516 | /* | ||
2364 | * The list lock was not taken therefore no list | 2517 | * The list lock was not taken therefore no list |
2365 | * activity can be necessary. | 2518 | * activity can be necessary. |
2366 | */ | 2519 | */ |
@@ -2429,7 +2582,6 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
2429 | slab_free_hook(s, x); | 2582 | slab_free_hook(s, x); |
2430 | 2583 | ||
2431 | redo: | 2584 | redo: |
2432 | |||
2433 | /* | 2585 | /* |
2434 | * Determine the currently cpus per cpu slab. | 2586 | * Determine the currently cpus per cpu slab. |
2435 | * The cpu may change afterward. However that does not matter since | 2587 | * The cpu may change afterward. However that does not matter since |
@@ -2919,7 +3071,34 @@ static int kmem_cache_open(struct kmem_cache *s, | |||
2919 | * The larger the object size is, the more pages we want on the partial | 3071 | * The larger the object size is, the more pages we want on the partial |
2920 | * list to avoid pounding the page allocator excessively. | 3072 | * list to avoid pounding the page allocator excessively. |
2921 | */ | 3073 | */ |
2922 | set_min_partial(s, ilog2(s->size)); | 3074 | set_min_partial(s, ilog2(s->size) / 2); |
3075 | |||
3076 | /* | ||
3077 | * cpu_partial determined the maximum number of objects kept in the | ||
3078 | * per cpu partial lists of a processor. | ||
3079 | * | ||
3080 | * Per cpu partial lists mainly contain slabs that just have one | ||
3081 | * object freed. If they are used for allocation then they can be | ||
3082 | * filled up again with minimal effort. The slab will never hit the | ||
3083 | * per node partial lists and therefore no locking will be required. | ||
3084 | * | ||
3085 | * This setting also determines | ||
3086 | * | ||
3087 | * A) The number of objects from per cpu partial slabs dumped to the | ||
3088 | * per node list when we reach the limit. | ||
3089 | * B) The number of objects in partial partial slabs to extract from the | ||
3090 | * per node list when we run out of per cpu objects. We only fetch 50% | ||
3091 | * to keep some capacity around for frees. | ||
3092 | */ | ||
3093 | if (s->size >= PAGE_SIZE) | ||
3094 | s->cpu_partial = 2; | ||
3095 | else if (s->size >= 1024) | ||
3096 | s->cpu_partial = 6; | ||
3097 | else if (s->size >= 256) | ||
3098 | s->cpu_partial = 13; | ||
3099 | else | ||
3100 | s->cpu_partial = 30; | ||
3101 | |||
2923 | s->refcount = 1; | 3102 | s->refcount = 1; |
2924 | #ifdef CONFIG_NUMA | 3103 | #ifdef CONFIG_NUMA |
2925 | s->remote_node_defrag_ratio = 1000; | 3104 | s->remote_node_defrag_ratio = 1000; |
@@ -4327,6 +4506,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
4327 | 4506 | ||
4328 | for_each_possible_cpu(cpu) { | 4507 | for_each_possible_cpu(cpu) { |
4329 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); | 4508 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); |
4509 | struct page *page; | ||
4330 | 4510 | ||
4331 | if (!c || c->node < 0) | 4511 | if (!c || c->node < 0) |
4332 | continue; | 4512 | continue; |
@@ -4342,6 +4522,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
4342 | total += x; | 4522 | total += x; |
4343 | nodes[c->node] += x; | 4523 | nodes[c->node] += x; |
4344 | } | 4524 | } |
4525 | page = c->partial; | ||
4526 | |||
4527 | if (page) { | ||
4528 | x = page->pobjects; | ||
4529 | total += x; | ||
4530 | nodes[c->node] += x; | ||
4531 | } | ||
4345 | per_cpu[c->node]++; | 4532 | per_cpu[c->node]++; |
4346 | } | 4533 | } |
4347 | } | 4534 | } |
@@ -4493,6 +4680,27 @@ static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, | |||
4493 | } | 4680 | } |
4494 | SLAB_ATTR(min_partial); | 4681 | SLAB_ATTR(min_partial); |
4495 | 4682 | ||
4683 | static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) | ||
4684 | { | ||
4685 | return sprintf(buf, "%u\n", s->cpu_partial); | ||
4686 | } | ||
4687 | |||
4688 | static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, | ||
4689 | size_t length) | ||
4690 | { | ||
4691 | unsigned long objects; | ||
4692 | int err; | ||
4693 | |||
4694 | err = strict_strtoul(buf, 10, &objects); | ||
4695 | if (err) | ||
4696 | return err; | ||
4697 | |||
4698 | s->cpu_partial = objects; | ||
4699 | flush_all(s); | ||
4700 | return length; | ||
4701 | } | ||
4702 | SLAB_ATTR(cpu_partial); | ||
4703 | |||
4496 | static ssize_t ctor_show(struct kmem_cache *s, char *buf) | 4704 | static ssize_t ctor_show(struct kmem_cache *s, char *buf) |
4497 | { | 4705 | { |
4498 | if (!s->ctor) | 4706 | if (!s->ctor) |
@@ -4531,6 +4739,37 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) | |||
4531 | } | 4739 | } |
4532 | SLAB_ATTR_RO(objects_partial); | 4740 | SLAB_ATTR_RO(objects_partial); |
4533 | 4741 | ||
4742 | static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) | ||
4743 | { | ||
4744 | int objects = 0; | ||
4745 | int pages = 0; | ||
4746 | int cpu; | ||
4747 | int len; | ||
4748 | |||
4749 | for_each_online_cpu(cpu) { | ||
4750 | struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial; | ||
4751 | |||
4752 | if (page) { | ||
4753 | pages += page->pages; | ||
4754 | objects += page->pobjects; | ||
4755 | } | ||
4756 | } | ||
4757 | |||
4758 | len = sprintf(buf, "%d(%d)", objects, pages); | ||
4759 | |||
4760 | #ifdef CONFIG_SMP | ||
4761 | for_each_online_cpu(cpu) { | ||
4762 | struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; | ||
4763 | |||
4764 | if (page && len < PAGE_SIZE - 20) | ||
4765 | len += sprintf(buf + len, " C%d=%d(%d)", cpu, | ||
4766 | page->pobjects, page->pages); | ||
4767 | } | ||
4768 | #endif | ||
4769 | return len + sprintf(buf + len, "\n"); | ||
4770 | } | ||
4771 | SLAB_ATTR_RO(slabs_cpu_partial); | ||
4772 | |||
4534 | static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) | 4773 | static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) |
4535 | { | 4774 | { |
4536 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); | 4775 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); |
@@ -4853,6 +5092,8 @@ STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); | |||
4853 | STAT_ATTR(ORDER_FALLBACK, order_fallback); | 5092 | STAT_ATTR(ORDER_FALLBACK, order_fallback); |
4854 | STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); | 5093 | STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); |
4855 | STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); | 5094 | STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); |
5095 | STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); | ||
5096 | STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); | ||
4856 | #endif | 5097 | #endif |
4857 | 5098 | ||
4858 | static struct attribute *slab_attrs[] = { | 5099 | static struct attribute *slab_attrs[] = { |
@@ -4861,6 +5102,7 @@ static struct attribute *slab_attrs[] = { | |||
4861 | &objs_per_slab_attr.attr, | 5102 | &objs_per_slab_attr.attr, |
4862 | &order_attr.attr, | 5103 | &order_attr.attr, |
4863 | &min_partial_attr.attr, | 5104 | &min_partial_attr.attr, |
5105 | &cpu_partial_attr.attr, | ||
4864 | &objects_attr.attr, | 5106 | &objects_attr.attr, |
4865 | &objects_partial_attr.attr, | 5107 | &objects_partial_attr.attr, |
4866 | &partial_attr.attr, | 5108 | &partial_attr.attr, |
@@ -4873,6 +5115,7 @@ static struct attribute *slab_attrs[] = { | |||
4873 | &destroy_by_rcu_attr.attr, | 5115 | &destroy_by_rcu_attr.attr, |
4874 | &shrink_attr.attr, | 5116 | &shrink_attr.attr, |
4875 | &reserved_attr.attr, | 5117 | &reserved_attr.attr, |
5118 | &slabs_cpu_partial_attr.attr, | ||
4876 | #ifdef CONFIG_SLUB_DEBUG | 5119 | #ifdef CONFIG_SLUB_DEBUG |
4877 | &total_objects_attr.attr, | 5120 | &total_objects_attr.attr, |
4878 | &slabs_attr.attr, | 5121 | &slabs_attr.attr, |
@@ -4914,6 +5157,8 @@ static struct attribute *slab_attrs[] = { | |||
4914 | &order_fallback_attr.attr, | 5157 | &order_fallback_attr.attr, |
4915 | &cmpxchg_double_fail_attr.attr, | 5158 | &cmpxchg_double_fail_attr.attr, |
4916 | &cmpxchg_double_cpu_fail_attr.attr, | 5159 | &cmpxchg_double_cpu_fail_attr.attr, |
5160 | &cpu_partial_alloc_attr.attr, | ||
5161 | &cpu_partial_free_attr.attr, | ||
4917 | #endif | 5162 | #endif |
4918 | #ifdef CONFIG_FAILSLAB | 5163 | #ifdef CONFIG_FAILSLAB |
4919 | &failslab_attr.attr, | 5164 | &failslab_attr.attr, |