diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2013-06-19 01:05:52 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2013-07-07 12:09:56 -0400 |
commit | 345c905d13a4ec9f774b6b4bc038fe4aef26cced (patch) | |
tree | 9b6177f5b4712ff78c3808b240e66079e8c8338f | |
parent | e7efa615ccf78394338144ff0187be331240748a (diff) |
slub: Make cpu partial slab support configurable
CPU partial support can introduce level of indeterminism that is not
wanted in certain context (like a realtime kernel). Make it
configurable.
This patch is based on Christoph Lameter's "slub: Make cpu partial slab
support configurable V2".
Acked-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r-- | init/Kconfig | 11 | ||||
-rw-r--r-- | mm/slub.c | 27 |
2 files changed, 32 insertions, 6 deletions
diff --git a/init/Kconfig b/init/Kconfig index 7d30240e5bfe..3b34a88cf34e 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -1511,6 +1511,17 @@ config SLOB | |||
1511 | 1511 | ||
1512 | endchoice | 1512 | endchoice |
1513 | 1513 | ||
1514 | config SLUB_CPU_PARTIAL | ||
1515 | default y | ||
1516 | depends on SLUB | ||
1517 | bool "SLUB per cpu partial cache" | ||
1518 | help | ||
1519 | Per cpu partial caches accellerate objects allocation and freeing | ||
1520 | that is local to a processor at the price of more indeterminism | ||
1521 | in the latency of the free. On overflow these caches will be cleared | ||
1522 | which requires the taking of locks that may cause latency spikes. | ||
1523 | Typically one would choose no for a realtime system. | ||
1524 | |||
1514 | config MMAP_ALLOW_UNINITIALIZED | 1525 | config MMAP_ALLOW_UNINITIALIZED |
1515 | bool "Allow mmapped anonymous memory to be uninitialized" | 1526 | bool "Allow mmapped anonymous memory to be uninitialized" |
1516 | depends on EXPERT && !MMU | 1527 | depends on EXPERT && !MMU |
@@ -122,6 +122,15 @@ static inline int kmem_cache_debug(struct kmem_cache *s) | |||
122 | #endif | 122 | #endif |
123 | } | 123 | } |
124 | 124 | ||
125 | static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) | ||
126 | { | ||
127 | #ifdef CONFIG_SLUB_CPU_PARTIAL | ||
128 | return !kmem_cache_debug(s); | ||
129 | #else | ||
130 | return false; | ||
131 | #endif | ||
132 | } | ||
133 | |||
125 | /* | 134 | /* |
126 | * Issues still to be resolved: | 135 | * Issues still to be resolved: |
127 | * | 136 | * |
@@ -1572,7 +1581,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, | |||
1572 | put_cpu_partial(s, page, 0); | 1581 | put_cpu_partial(s, page, 0); |
1573 | stat(s, CPU_PARTIAL_NODE); | 1582 | stat(s, CPU_PARTIAL_NODE); |
1574 | } | 1583 | } |
1575 | if (kmem_cache_debug(s) || available > s->cpu_partial / 2) | 1584 | if (!kmem_cache_has_cpu_partial(s) |
1585 | || available > s->cpu_partial / 2) | ||
1576 | break; | 1586 | break; |
1577 | 1587 | ||
1578 | } | 1588 | } |
@@ -1883,6 +1893,7 @@ redo: | |||
1883 | static void unfreeze_partials(struct kmem_cache *s, | 1893 | static void unfreeze_partials(struct kmem_cache *s, |
1884 | struct kmem_cache_cpu *c) | 1894 | struct kmem_cache_cpu *c) |
1885 | { | 1895 | { |
1896 | #ifdef CONFIG_SLUB_CPU_PARTIAL | ||
1886 | struct kmem_cache_node *n = NULL, *n2 = NULL; | 1897 | struct kmem_cache_node *n = NULL, *n2 = NULL; |
1887 | struct page *page, *discard_page = NULL; | 1898 | struct page *page, *discard_page = NULL; |
1888 | 1899 | ||
@@ -1937,6 +1948,7 @@ static void unfreeze_partials(struct kmem_cache *s, | |||
1937 | discard_slab(s, page); | 1948 | discard_slab(s, page); |
1938 | stat(s, FREE_SLAB); | 1949 | stat(s, FREE_SLAB); |
1939 | } | 1950 | } |
1951 | #endif | ||
1940 | } | 1952 | } |
1941 | 1953 | ||
1942 | /* | 1954 | /* |
@@ -1950,6 +1962,7 @@ static void unfreeze_partials(struct kmem_cache *s, | |||
1950 | */ | 1962 | */ |
1951 | static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | 1963 | static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) |
1952 | { | 1964 | { |
1965 | #ifdef CONFIG_SLUB_CPU_PARTIAL | ||
1953 | struct page *oldpage; | 1966 | struct page *oldpage; |
1954 | int pages; | 1967 | int pages; |
1955 | int pobjects; | 1968 | int pobjects; |
@@ -1989,6 +2002,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1989 | page->next = oldpage; | 2002 | page->next = oldpage; |
1990 | 2003 | ||
1991 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); | 2004 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); |
2005 | #endif | ||
1992 | } | 2006 | } |
1993 | 2007 | ||
1994 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | 2008 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |
@@ -2497,7 +2511,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2497 | new.inuse--; | 2511 | new.inuse--; |
2498 | if ((!new.inuse || !prior) && !was_frozen) { | 2512 | if ((!new.inuse || !prior) && !was_frozen) { |
2499 | 2513 | ||
2500 | if (!kmem_cache_debug(s) && !prior) | 2514 | if (kmem_cache_has_cpu_partial(s) && !prior) |
2501 | 2515 | ||
2502 | /* | 2516 | /* |
2503 | * Slab was on no list before and will be partially empty | 2517 | * Slab was on no list before and will be partially empty |
@@ -2552,8 +2566,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2552 | * Objects left in the slab. If it was not on the partial list before | 2566 | * Objects left in the slab. If it was not on the partial list before |
2553 | * then add it. | 2567 | * then add it. |
2554 | */ | 2568 | */ |
2555 | if (kmem_cache_debug(s) && unlikely(!prior)) { | 2569 | if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { |
2556 | remove_full(s, page); | 2570 | if (kmem_cache_debug(s)) |
2571 | remove_full(s, page); | ||
2557 | add_partial(n, page, DEACTIVATE_TO_TAIL); | 2572 | add_partial(n, page, DEACTIVATE_TO_TAIL); |
2558 | stat(s, FREE_ADD_PARTIAL); | 2573 | stat(s, FREE_ADD_PARTIAL); |
2559 | } | 2574 | } |
@@ -3061,7 +3076,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) | |||
3061 | * per node list when we run out of per cpu objects. We only fetch 50% | 3076 | * per node list when we run out of per cpu objects. We only fetch 50% |
3062 | * to keep some capacity around for frees. | 3077 | * to keep some capacity around for frees. |
3063 | */ | 3078 | */ |
3064 | if (kmem_cache_debug(s)) | 3079 | if (!kmem_cache_has_cpu_partial(s)) |
3065 | s->cpu_partial = 0; | 3080 | s->cpu_partial = 0; |
3066 | else if (s->size >= PAGE_SIZE) | 3081 | else if (s->size >= PAGE_SIZE) |
3067 | s->cpu_partial = 2; | 3082 | s->cpu_partial = 2; |
@@ -4456,7 +4471,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, | |||
4456 | err = strict_strtoul(buf, 10, &objects); | 4471 | err = strict_strtoul(buf, 10, &objects); |
4457 | if (err) | 4472 | if (err) |
4458 | return err; | 4473 | return err; |
4459 | if (objects && kmem_cache_debug(s)) | 4474 | if (objects && !kmem_cache_has_cpu_partial(s)) |
4460 | return -EINVAL; | 4475 | return -EINVAL; |
4461 | 4476 | ||
4462 | s->cpu_partial = objects; | 4477 | s->cpu_partial = objects; |