aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:38:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:38:52 -0400
commitbb193c986a7104f718c1b92709e1e6e22ac3f864 (patch)
tree1d5a1b08e31a8e39ec4688afd24695c0c6a113ae /mm
parentf65ac45e20b03081ed64f41ce91bb982f8ac258d (diff)
parentaceda773606f2506a25b91aaafae87b2e4315834 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: slub: fix slab_pad_check() slub: release kobject if sysfs_create_group failed in sysfs_slab_add SLUB: fix ARCH_KMALLOC_MINALIGN cases 64 and 256 SLUB: Fix some coding style issues SLUB: Drop write permission to /proc/slabinfo slab: remove duplicate kmem_cache_init_late() declarations slub: change kmem_cache->align to record the real alignment slub: use size and objsize orders to disable debug flags slub: add option to disable higher order debugging slabs
Diffstat (limited to 'mm')
-rw-r--r--mm/slob.c5
-rw-r--r--mm/slub.c82
2 files changed, 73 insertions, 14 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 9641da3d5e58..837ebd64cc34 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -692,3 +692,8 @@ void __init kmem_cache_init(void)
692{ 692{
693 slob_ready = 1; 693 slob_ready = 1;
694} 694}
695
696void __init kmem_cache_init_late(void)
697{
698 /* Nothing to do */
699}
diff --git a/mm/slub.c b/mm/slub.c
index b6276753626e..417ed843b251 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -141,6 +141,13 @@
141 SLAB_POISON | SLAB_STORE_USER) 141 SLAB_POISON | SLAB_STORE_USER)
142 142
143/* 143/*
144 * Debugging flags that require metadata to be stored in the slab. These get
145 * disabled when slub_debug=O is used and a cache's min order increases with
146 * metadata.
147 */
148#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
149
150/*
144 * Set of flags that will prevent slab merging 151 * Set of flags that will prevent slab merging
145 */ 152 */
146#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 153#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
@@ -325,6 +332,7 @@ static int slub_debug;
325#endif 332#endif
326 333
327static char *slub_debug_slabs; 334static char *slub_debug_slabs;
335static int disable_higher_order_debug;
328 336
329/* 337/*
330 * Object debugging 338 * Object debugging
@@ -646,7 +654,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
646 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 654 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
647 print_section("Padding", end - remainder, remainder); 655 print_section("Padding", end - remainder, remainder);
648 656
649 restore_bytes(s, "slab padding", POISON_INUSE, start, end); 657 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
650 return 0; 658 return 0;
651} 659}
652 660
@@ -976,6 +984,15 @@ static int __init setup_slub_debug(char *str)
976 */ 984 */
977 goto check_slabs; 985 goto check_slabs;
978 986
987 if (tolower(*str) == 'o') {
988 /*
989 * Avoid enabling debugging on caches if its minimum order
990 * would increase as a result.
991 */
992 disable_higher_order_debug = 1;
993 goto out;
994 }
995
979 slub_debug = 0; 996 slub_debug = 0;
980 if (*str == '-') 997 if (*str == '-')
981 /* 998 /*
@@ -1026,8 +1043,8 @@ static unsigned long kmem_cache_flags(unsigned long objsize,
1026 * Enable debugging if selected on the kernel commandline. 1043 * Enable debugging if selected on the kernel commandline.
1027 */ 1044 */
1028 if (slub_debug && (!slub_debug_slabs || 1045 if (slub_debug && (!slub_debug_slabs ||
1029 strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0)) 1046 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1030 flags |= slub_debug; 1047 flags |= slub_debug;
1031 1048
1032 return flags; 1049 return flags;
1033} 1050}
@@ -1109,8 +1126,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1109 } 1126 }
1110 1127
1111 if (kmemcheck_enabled 1128 if (kmemcheck_enabled
1112 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) 1129 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1113 {
1114 int pages = 1 << oo_order(oo); 1130 int pages = 1 << oo_order(oo);
1115 1131
1116 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); 1132 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
@@ -1560,6 +1576,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
1560 "default order: %d, min order: %d\n", s->name, s->objsize, 1576 "default order: %d, min order: %d\n", s->name, s->objsize,
1561 s->size, oo_order(s->oo), oo_order(s->min)); 1577 s->size, oo_order(s->oo), oo_order(s->min));
1562 1578
1579 if (oo_order(s->min) > get_order(s->objsize))
1580 printk(KERN_WARNING " %s debugging increased min order, use "
1581 "slub_debug=O to disable.\n", s->name);
1582
1563 for_each_online_node(node) { 1583 for_each_online_node(node) {
1564 struct kmem_cache_node *n = get_node(s, node); 1584 struct kmem_cache_node *n = get_node(s, node);
1565 unsigned long nr_slabs; 1585 unsigned long nr_slabs;
@@ -2001,7 +2021,7 @@ static inline int calculate_order(int size)
2001 return order; 2021 return order;
2002 fraction /= 2; 2022 fraction /= 2;
2003 } 2023 }
2004 min_objects --; 2024 min_objects--;
2005 } 2025 }
2006 2026
2007 /* 2027 /*
@@ -2400,6 +2420,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2400 * on bootup. 2420 * on bootup.
2401 */ 2421 */
2402 align = calculate_alignment(flags, align, s->objsize); 2422 align = calculate_alignment(flags, align, s->objsize);
2423 s->align = align;
2403 2424
2404 /* 2425 /*
2405 * SLUB stores one object immediately after another beginning from 2426 * SLUB stores one object immediately after another beginning from
@@ -2452,6 +2473,18 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2452 2473
2453 if (!calculate_sizes(s, -1)) 2474 if (!calculate_sizes(s, -1))
2454 goto error; 2475 goto error;
2476 if (disable_higher_order_debug) {
2477 /*
2478 * Disable debugging flags that store metadata if the min slab
2479 * order increased.
2480 */
2481 if (get_order(s->size) > get_order(s->objsize)) {
2482 s->flags &= ~DEBUG_METADATA_FLAGS;
2483 s->offset = 0;
2484 if (!calculate_sizes(s, -1))
2485 goto error;
2486 }
2487 }
2455 2488
2456 /* 2489 /*
2457 * The larger the object size is, the more pages we want on the partial 2490 * The larger the object size is, the more pages we want on the partial
@@ -2790,6 +2823,11 @@ static s8 size_index[24] = {
2790 2 /* 192 */ 2823 2 /* 192 */
2791}; 2824};
2792 2825
2826static inline int size_index_elem(size_t bytes)
2827{
2828 return (bytes - 1) / 8;
2829}
2830
2793static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2831static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2794{ 2832{
2795 int index; 2833 int index;
@@ -2798,7 +2836,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2798 if (!size) 2836 if (!size)
2799 return ZERO_SIZE_PTR; 2837 return ZERO_SIZE_PTR;
2800 2838
2801 index = size_index[(size - 1) / 8]; 2839 index = size_index[size_index_elem(size)];
2802 } else 2840 } else
2803 index = fls(size - 1); 2841 index = fls(size - 1);
2804 2842
@@ -3156,10 +3194,12 @@ void __init kmem_cache_init(void)
3156 slab_state = PARTIAL; 3194 slab_state = PARTIAL;
3157 3195
3158 /* Caches that are not of the two-to-the-power-of size */ 3196 /* Caches that are not of the two-to-the-power-of size */
3159 if (KMALLOC_MIN_SIZE <= 64) { 3197 if (KMALLOC_MIN_SIZE <= 32) {
3160 create_kmalloc_cache(&kmalloc_caches[1], 3198 create_kmalloc_cache(&kmalloc_caches[1],
3161 "kmalloc-96", 96, GFP_NOWAIT); 3199 "kmalloc-96", 96, GFP_NOWAIT);
3162 caches++; 3200 caches++;
3201 }
3202 if (KMALLOC_MIN_SIZE <= 64) {
3163 create_kmalloc_cache(&kmalloc_caches[2], 3203 create_kmalloc_cache(&kmalloc_caches[2],
3164 "kmalloc-192", 192, GFP_NOWAIT); 3204 "kmalloc-192", 192, GFP_NOWAIT);
3165 caches++; 3205 caches++;
@@ -3186,17 +3226,28 @@ void __init kmem_cache_init(void)
3186 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 3226 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3187 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 3227 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3188 3228
3189 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3229 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3190 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3230 int elem = size_index_elem(i);
3231 if (elem >= ARRAY_SIZE(size_index))
3232 break;
3233 size_index[elem] = KMALLOC_SHIFT_LOW;
3234 }
3191 3235
3192 if (KMALLOC_MIN_SIZE == 128) { 3236 if (KMALLOC_MIN_SIZE == 64) {
3237 /*
3238 * The 96 byte size cache is not used if the alignment
3239 * is 64 byte.
3240 */
3241 for (i = 64 + 8; i <= 96; i += 8)
3242 size_index[size_index_elem(i)] = 7;
3243 } else if (KMALLOC_MIN_SIZE == 128) {
3193 /* 3244 /*
3194 * The 192 byte sized cache is not used if the alignment 3245 * The 192 byte sized cache is not used if the alignment
3195 * is 128 byte. Redirect kmalloc to use the 256 byte cache 3246 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3196 * instead. 3247 * instead.
3197 */ 3248 */
3198 for (i = 128 + 8; i <= 192; i += 8) 3249 for (i = 128 + 8; i <= 192; i += 8)
3199 size_index[(i - 1) / 8] = 8; 3250 size_index[size_index_elem(i)] = 8;
3200 } 3251 }
3201 3252
3202 slab_state = UP; 3253 slab_state = UP;
@@ -4543,8 +4594,11 @@ static int sysfs_slab_add(struct kmem_cache *s)
4543 } 4594 }
4544 4595
4545 err = sysfs_create_group(&s->kobj, &slab_attr_group); 4596 err = sysfs_create_group(&s->kobj, &slab_attr_group);
4546 if (err) 4597 if (err) {
4598 kobject_del(&s->kobj);
4599 kobject_put(&s->kobj);
4547 return err; 4600 return err;
4601 }
4548 kobject_uevent(&s->kobj, KOBJ_ADD); 4602 kobject_uevent(&s->kobj, KOBJ_ADD);
4549 if (!unmergeable) { 4603 if (!unmergeable) {
4550 /* Setup first alias */ 4604 /* Setup first alias */
@@ -4726,7 +4780,7 @@ static const struct file_operations proc_slabinfo_operations = {
4726 4780
4727static int __init slab_proc_init(void) 4781static int __init slab_proc_init(void)
4728{ 4782{
4729 proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); 4783 proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
4730 return 0; 4784 return 0;
4731} 4785}
4732module_init(slab_proc_init); 4786module_init(slab_proc_init);