diff options
author | David Rientjes <rientjes@google.com> | 2009-07-07 03:14:14 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-07-10 02:52:55 -0400 |
commit | fa5ec8a1f66f3c2a3af723abcf8085509c9ee682 (patch) | |
tree | be8e06f8ace38ed40ec2af4465dd1fffbc4b4f09 /mm | |
parent | c2cc49a2f8a479dde96a599646d30b6cc9dbed78 (diff) |
slub: add option to disable higher order debugging slabs
When debugging is enabled, slub requires that additional metadata be
stored in slabs for certain options: SLAB_RED_ZONE, SLAB_POISON, and
SLAB_STORE_USER.
Consequently, it may require that the minimum possible slab order needed
to allocate a single object be greater when using these options. The
most notable example is for objects that are PAGE_SIZE bytes in size.
Higher minimum slab orders may cause page allocation failures when oom or
under heavy fragmentation.
This patch adds a new slub_debug option, which disables debugging by
default for caches that would have resulted in higher minimum orders:
slub_debug=O
When this option is used on systems with 4K pages, kmalloc-4096, for
example, will not have debugging enabled by default even if
CONFIG_SLUB_DEBUG_ON is defined because it would have resulted in a
order-1 minimum slab order.
Reported-by: Larry Finger <Larry.Finger@lwfinger.net>
Tested-by: Larry Finger <Larry.Finger@lwfinger.net>
Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 41 |
1 files changed, 38 insertions, 3 deletions
@@ -142,6 +142,13 @@ | |||
142 | SLAB_POISON | SLAB_STORE_USER) | 142 | SLAB_POISON | SLAB_STORE_USER) |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * Debugging flags that require metadata to be stored in the slab, up to | ||
146 | * DEBUG_SIZE in size. | ||
147 | */ | ||
148 | #define DEBUG_SIZE_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) | ||
149 | #define DEBUG_SIZE (3 * sizeof(void *) + 2 * sizeof(struct track)) | ||
150 | |||
151 | /* | ||
145 | * Set of flags that will prevent slab merging | 152 | * Set of flags that will prevent slab merging |
146 | */ | 153 | */ |
147 | #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | 154 | #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
@@ -326,6 +333,7 @@ static int slub_debug; | |||
326 | #endif | 333 | #endif |
327 | 334 | ||
328 | static char *slub_debug_slabs; | 335 | static char *slub_debug_slabs; |
336 | static int disable_higher_order_debug; | ||
329 | 337 | ||
330 | /* | 338 | /* |
331 | * Object debugging | 339 | * Object debugging |
@@ -977,6 +985,15 @@ static int __init setup_slub_debug(char *str) | |||
977 | */ | 985 | */ |
978 | goto check_slabs; | 986 | goto check_slabs; |
979 | 987 | ||
988 | if (tolower(*str) == 'o') { | ||
989 | /* | ||
990 | * Avoid enabling debugging on caches if its minimum order | ||
991 | * would increase as a result. | ||
992 | */ | ||
993 | disable_higher_order_debug = 1; | ||
994 | goto out; | ||
995 | } | ||
996 | |||
980 | slub_debug = 0; | 997 | slub_debug = 0; |
981 | if (*str == '-') | 998 | if (*str == '-') |
982 | /* | 999 | /* |
@@ -1023,13 +1040,27 @@ static unsigned long kmem_cache_flags(unsigned long objsize, | |||
1023 | unsigned long flags, const char *name, | 1040 | unsigned long flags, const char *name, |
1024 | void (*ctor)(void *)) | 1041 | void (*ctor)(void *)) |
1025 | { | 1042 | { |
1043 | int debug_flags = slub_debug; | ||
1044 | |||
1026 | /* | 1045 | /* |
1027 | * Enable debugging if selected on the kernel commandline. | 1046 | * Enable debugging if selected on the kernel commandline. |
1028 | */ | 1047 | */ |
1029 | if (slub_debug && (!slub_debug_slabs || | 1048 | if (debug_flags) { |
1030 | strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0)) | 1049 | if (slub_debug_slabs && |
1031 | flags |= slub_debug; | 1050 | strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))) |
1051 | goto out; | ||
1052 | |||
1053 | /* | ||
1054 | * Disable debugging that increases slab size if the minimum | ||
1055 | * slab order would have increased as a result. | ||
1056 | */ | ||
1057 | if (disable_higher_order_debug && | ||
1058 | get_order(objsize + DEBUG_SIZE) > get_order(objsize)) | ||
1059 | debug_flags &= ~DEBUG_SIZE_FLAGS; | ||
1032 | 1060 | ||
1061 | flags |= debug_flags; | ||
1062 | } | ||
1063 | out: | ||
1033 | return flags; | 1064 | return flags; |
1034 | } | 1065 | } |
1035 | #else | 1066 | #else |
@@ -1561,6 +1592,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) | |||
1561 | "default order: %d, min order: %d\n", s->name, s->objsize, | 1592 | "default order: %d, min order: %d\n", s->name, s->objsize, |
1562 | s->size, oo_order(s->oo), oo_order(s->min)); | 1593 | s->size, oo_order(s->oo), oo_order(s->min)); |
1563 | 1594 | ||
1595 | if (oo_order(s->min) > get_order(s->objsize)) | ||
1596 | printk(KERN_WARNING " %s debugging increased min order, use " | ||
1597 | "slub_debug=O to disable.\n", s->name); | ||
1598 | |||
1564 | for_each_online_node(node) { | 1599 | for_each_online_node(node) { |
1565 | struct kmem_cache_node *n = get_node(s, node); | 1600 | struct kmem_cache_node *n = get_node(s, node); |
1566 | unsigned long nr_slabs; | 1601 | unsigned long nr_slabs; |