aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/vm/slub.txt10
-rw-r--r--include/linux/slob_def.h5
-rw-r--r--include/linux/slub_def.h8
-rw-r--r--mm/slob.c5
-rw-r--r--mm/slub.c82
5 files changed, 85 insertions, 25 deletions
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index bb1f5c6e28b..510917ff59e 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -41,6 +41,8 @@ Possible debug options are
41 P Poisoning (object and padding) 41 P Poisoning (object and padding)
42 U User tracking (free and alloc) 42 U User tracking (free and alloc)
43 T Trace (please only use on single slabs) 43 T Trace (please only use on single slabs)
44 O Switch debugging off for caches that would have
45 caused higher minimum slab orders
44 - Switch all debugging off (useful if the kernel is 46 - Switch all debugging off (useful if the kernel is
45 configured with CONFIG_SLUB_DEBUG_ON) 47 configured with CONFIG_SLUB_DEBUG_ON)
46 48
@@ -59,6 +61,14 @@ to the dentry cache with
59 61
60 slub_debug=F,dentry 62 slub_debug=F,dentry
61 63
64Debugging options may require the minimum possible slab order to increase as
65a result of storing the metadata (for example, caches with PAGE_SIZE object
66sizes). This has a higher liklihood of resulting in slab allocation errors
67in low memory situations or if there's high fragmentation of memory. To
68switch off debugging for such caches by default, use
69
70 slub_debug=O
71
62In case you forgot to enable debugging on the kernel command line: It is 72In case you forgot to enable debugging on the kernel command line: It is
63possible to enable debugging manually when the kernel is up. Look at the 73possible to enable debugging manually when the kernel is up. Look at the
64contents of: 74contents of:
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index bb5368df4be..0ec00b39d00 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -34,9 +34,4 @@ static __always_inline void *__kmalloc(size_t size, gfp_t flags)
34 return kmalloc(size, flags); 34 return kmalloc(size, flags);
35} 35}
36 36
37static inline void kmem_cache_init_late(void)
38{
39 /* Nothing to do */
40}
41
42#endif /* __LINUX_SLOB_DEF_H */ 37#endif /* __LINUX_SLOB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index c1c862b1d01..5ad70a60fd7 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -153,12 +153,10 @@ static __always_inline int kmalloc_index(size_t size)
153 if (size <= KMALLOC_MIN_SIZE) 153 if (size <= KMALLOC_MIN_SIZE)
154 return KMALLOC_SHIFT_LOW; 154 return KMALLOC_SHIFT_LOW;
155 155
156#if KMALLOC_MIN_SIZE <= 64 156 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
157 if (size > 64 && size <= 96)
158 return 1; 157 return 1;
159 if (size > 128 && size <= 192) 158 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
160 return 2; 159 return 2;
161#endif
162 if (size <= 8) return 3; 160 if (size <= 8) return 3;
163 if (size <= 16) return 4; 161 if (size <= 16) return 4;
164 if (size <= 32) return 5; 162 if (size <= 32) return 5;
@@ -304,6 +302,4 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
304} 302}
305#endif 303#endif
306 304
307void __init kmem_cache_init_late(void);
308
309#endif /* _LINUX_SLUB_DEF_H */ 305#endif /* _LINUX_SLUB_DEF_H */
diff --git a/mm/slob.c b/mm/slob.c
index 9641da3d5e5..837ebd64cc3 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -692,3 +692,8 @@ void __init kmem_cache_init(void)
692{ 692{
693 slob_ready = 1; 693 slob_ready = 1;
694} 694}
695
696void __init kmem_cache_init_late(void)
697{
698 /* Nothing to do */
699}
diff --git a/mm/slub.c b/mm/slub.c
index b6276753626..417ed843b25 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -141,6 +141,13 @@
141 SLAB_POISON | SLAB_STORE_USER) 141 SLAB_POISON | SLAB_STORE_USER)
142 142
143/* 143/*
144 * Debugging flags that require metadata to be stored in the slab. These get
145 * disabled when slub_debug=O is used and a cache's min order increases with
146 * metadata.
147 */
148#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
149
150/*
144 * Set of flags that will prevent slab merging 151 * Set of flags that will prevent slab merging
145 */ 152 */
146#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 153#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
@@ -325,6 +332,7 @@ static int slub_debug;
325#endif 332#endif
326 333
327static char *slub_debug_slabs; 334static char *slub_debug_slabs;
335static int disable_higher_order_debug;
328 336
329/* 337/*
330 * Object debugging 338 * Object debugging
@@ -646,7 +654,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
646 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 654 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
647 print_section("Padding", end - remainder, remainder); 655 print_section("Padding", end - remainder, remainder);
648 656
649 restore_bytes(s, "slab padding", POISON_INUSE, start, end); 657 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
650 return 0; 658 return 0;
651} 659}
652 660
@@ -976,6 +984,15 @@ static int __init setup_slub_debug(char *str)
976 */ 984 */
977 goto check_slabs; 985 goto check_slabs;
978 986
987 if (tolower(*str) == 'o') {
988 /*
989 * Avoid enabling debugging on caches if its minimum order
990 * would increase as a result.
991 */
992 disable_higher_order_debug = 1;
993 goto out;
994 }
995
979 slub_debug = 0; 996 slub_debug = 0;
980 if (*str == '-') 997 if (*str == '-')
981 /* 998 /*
@@ -1026,8 +1043,8 @@ static unsigned long kmem_cache_flags(unsigned long objsize,
1026 * Enable debugging if selected on the kernel commandline. 1043 * Enable debugging if selected on the kernel commandline.
1027 */ 1044 */
1028 if (slub_debug && (!slub_debug_slabs || 1045 if (slub_debug && (!slub_debug_slabs ||
1029 strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0)) 1046 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1030 flags |= slub_debug; 1047 flags |= slub_debug;
1031 1048
1032 return flags; 1049 return flags;
1033} 1050}
@@ -1109,8 +1126,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1109 } 1126 }
1110 1127
1111 if (kmemcheck_enabled 1128 if (kmemcheck_enabled
1112 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) 1129 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1113 {
1114 int pages = 1 << oo_order(oo); 1130 int pages = 1 << oo_order(oo);
1115 1131
1116 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); 1132 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
@@ -1560,6 +1576,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
1560 "default order: %d, min order: %d\n", s->name, s->objsize, 1576 "default order: %d, min order: %d\n", s->name, s->objsize,
1561 s->size, oo_order(s->oo), oo_order(s->min)); 1577 s->size, oo_order(s->oo), oo_order(s->min));
1562 1578
1579 if (oo_order(s->min) > get_order(s->objsize))
1580 printk(KERN_WARNING " %s debugging increased min order, use "
1581 "slub_debug=O to disable.\n", s->name);
1582
1563 for_each_online_node(node) { 1583 for_each_online_node(node) {
1564 struct kmem_cache_node *n = get_node(s, node); 1584 struct kmem_cache_node *n = get_node(s, node);
1565 unsigned long nr_slabs; 1585 unsigned long nr_slabs;
@@ -2001,7 +2021,7 @@ static inline int calculate_order(int size)
2001 return order; 2021 return order;
2002 fraction /= 2; 2022 fraction /= 2;
2003 } 2023 }
2004 min_objects --; 2024 min_objects--;
2005 } 2025 }
2006 2026
2007 /* 2027 /*
@@ -2400,6 +2420,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2400 * on bootup. 2420 * on bootup.
2401 */ 2421 */
2402 align = calculate_alignment(flags, align, s->objsize); 2422 align = calculate_alignment(flags, align, s->objsize);
2423 s->align = align;
2403 2424
2404 /* 2425 /*
2405 * SLUB stores one object immediately after another beginning from 2426 * SLUB stores one object immediately after another beginning from
@@ -2452,6 +2473,18 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2452 2473
2453 if (!calculate_sizes(s, -1)) 2474 if (!calculate_sizes(s, -1))
2454 goto error; 2475 goto error;
2476 if (disable_higher_order_debug) {
2477 /*
2478 * Disable debugging flags that store metadata if the min slab
2479 * order increased.
2480 */
2481 if (get_order(s->size) > get_order(s->objsize)) {
2482 s->flags &= ~DEBUG_METADATA_FLAGS;
2483 s->offset = 0;
2484 if (!calculate_sizes(s, -1))
2485 goto error;
2486 }
2487 }
2455 2488
2456 /* 2489 /*
2457 * The larger the object size is, the more pages we want on the partial 2490 * The larger the object size is, the more pages we want on the partial
@@ -2790,6 +2823,11 @@ static s8 size_index[24] = {
2790 2 /* 192 */ 2823 2 /* 192 */
2791}; 2824};
2792 2825
2826static inline int size_index_elem(size_t bytes)
2827{
2828 return (bytes - 1) / 8;
2829}
2830
2793static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2831static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2794{ 2832{
2795 int index; 2833 int index;
@@ -2798,7 +2836,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2798 if (!size) 2836 if (!size)
2799 return ZERO_SIZE_PTR; 2837 return ZERO_SIZE_PTR;
2800 2838
2801 index = size_index[(size - 1) / 8]; 2839 index = size_index[size_index_elem(size)];
2802 } else 2840 } else
2803 index = fls(size - 1); 2841 index = fls(size - 1);
2804 2842
@@ -3156,10 +3194,12 @@ void __init kmem_cache_init(void)
3156 slab_state = PARTIAL; 3194 slab_state = PARTIAL;
3157 3195
3158 /* Caches that are not of the two-to-the-power-of size */ 3196 /* Caches that are not of the two-to-the-power-of size */
3159 if (KMALLOC_MIN_SIZE <= 64) { 3197 if (KMALLOC_MIN_SIZE <= 32) {
3160 create_kmalloc_cache(&kmalloc_caches[1], 3198 create_kmalloc_cache(&kmalloc_caches[1],
3161 "kmalloc-96", 96, GFP_NOWAIT); 3199 "kmalloc-96", 96, GFP_NOWAIT);
3162 caches++; 3200 caches++;
3201 }
3202 if (KMALLOC_MIN_SIZE <= 64) {
3163 create_kmalloc_cache(&kmalloc_caches[2], 3203 create_kmalloc_cache(&kmalloc_caches[2],
3164 "kmalloc-192", 192, GFP_NOWAIT); 3204 "kmalloc-192", 192, GFP_NOWAIT);
3165 caches++; 3205 caches++;
@@ -3186,17 +3226,28 @@ void __init kmem_cache_init(void)
3186 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 3226 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3187 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 3227 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3188 3228
3189 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3229 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3190 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3230 int elem = size_index_elem(i);
3231 if (elem >= ARRAY_SIZE(size_index))
3232 break;
3233 size_index[elem] = KMALLOC_SHIFT_LOW;
3234 }
3191 3235
3192 if (KMALLOC_MIN_SIZE == 128) { 3236 if (KMALLOC_MIN_SIZE == 64) {
3237 /*
3238 * The 96 byte size cache is not used if the alignment
3239 * is 64 byte.
3240 */
3241 for (i = 64 + 8; i <= 96; i += 8)
3242 size_index[size_index_elem(i)] = 7;
3243 } else if (KMALLOC_MIN_SIZE == 128) {
3193 /* 3244 /*
3194 * The 192 byte sized cache is not used if the alignment 3245 * The 192 byte sized cache is not used if the alignment
3195 * is 128 byte. Redirect kmalloc to use the 256 byte cache 3246 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3196 * instead. 3247 * instead.
3197 */ 3248 */
3198 for (i = 128 + 8; i <= 192; i += 8) 3249 for (i = 128 + 8; i <= 192; i += 8)
3199 size_index[(i - 1) / 8] = 8; 3250 size_index[size_index_elem(i)] = 8;
3200 } 3251 }
3201 3252
3202 slab_state = UP; 3253 slab_state = UP;
@@ -4543,8 +4594,11 @@ static int sysfs_slab_add(struct kmem_cache *s)
4543 } 4594 }
4544 4595
4545 err = sysfs_create_group(&s->kobj, &slab_attr_group); 4596 err = sysfs_create_group(&s->kobj, &slab_attr_group);
4546 if (err) 4597 if (err) {
4598 kobject_del(&s->kobj);
4599 kobject_put(&s->kobj);
4547 return err; 4600 return err;
4601 }
4548 kobject_uevent(&s->kobj, KOBJ_ADD); 4602 kobject_uevent(&s->kobj, KOBJ_ADD);
4549 if (!unmergeable) { 4603 if (!unmergeable) {
4550 /* Setup first alias */ 4604 /* Setup first alias */
@@ -4726,7 +4780,7 @@ static const struct file_operations proc_slabinfo_operations = {
4726 4780
4727static int __init slab_proc_init(void) 4781static int __init slab_proc_init(void)
4728{ 4782{
4729 proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); 4783 proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
4730 return 0; 4784 return 0;
4731} 4785}
4732module_init(slab_proc_init); 4786module_init(slab_proc_init);