diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 91 |
1 files changed, 75 insertions, 16 deletions
@@ -141,6 +141,13 @@ | |||
141 | SLAB_POISON | SLAB_STORE_USER) | 141 | SLAB_POISON | SLAB_STORE_USER) |
142 | 142 | ||
143 | /* | 143 | /* |
144 | * Debugging flags that require metadata to be stored in the slab. These get | ||
145 | * disabled when slub_debug=O is used and a cache's min order increases with | ||
146 | * metadata. | ||
147 | */ | ||
148 | #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) | ||
149 | |||
150 | /* | ||
144 | * Set of flags that will prevent slab merging | 151 | * Set of flags that will prevent slab merging |
145 | */ | 152 | */ |
146 | #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | 153 | #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
@@ -325,6 +332,7 @@ static int slub_debug; | |||
325 | #endif | 332 | #endif |
326 | 333 | ||
327 | static char *slub_debug_slabs; | 334 | static char *slub_debug_slabs; |
335 | static int disable_higher_order_debug; | ||
328 | 336 | ||
329 | /* | 337 | /* |
330 | * Object debugging | 338 | * Object debugging |
@@ -646,7 +654,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) | |||
646 | slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); | 654 | slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); |
647 | print_section("Padding", end - remainder, remainder); | 655 | print_section("Padding", end - remainder, remainder); |
648 | 656 | ||
649 | restore_bytes(s, "slab padding", POISON_INUSE, start, end); | 657 | restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); |
650 | return 0; | 658 | return 0; |
651 | } | 659 | } |
652 | 660 | ||
@@ -976,6 +984,15 @@ static int __init setup_slub_debug(char *str) | |||
976 | */ | 984 | */ |
977 | goto check_slabs; | 985 | goto check_slabs; |
978 | 986 | ||
987 | if (tolower(*str) == 'o') { | ||
988 | /* | ||
989 | * Avoid enabling debugging on caches if its minimum order | ||
990 | * would increase as a result. | ||
991 | */ | ||
992 | disable_higher_order_debug = 1; | ||
993 | goto out; | ||
994 | } | ||
995 | |||
979 | slub_debug = 0; | 996 | slub_debug = 0; |
980 | if (*str == '-') | 997 | if (*str == '-') |
981 | /* | 998 | /* |
@@ -1026,8 +1043,8 @@ static unsigned long kmem_cache_flags(unsigned long objsize, | |||
1026 | * Enable debugging if selected on the kernel commandline. | 1043 | * Enable debugging if selected on the kernel commandline. |
1027 | */ | 1044 | */ |
1028 | if (slub_debug && (!slub_debug_slabs || | 1045 | if (slub_debug && (!slub_debug_slabs || |
1029 | strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0)) | 1046 | !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))) |
1030 | flags |= slub_debug; | 1047 | flags |= slub_debug; |
1031 | 1048 | ||
1032 | return flags; | 1049 | return flags; |
1033 | } | 1050 | } |
@@ -1054,6 +1071,8 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize, | |||
1054 | } | 1071 | } |
1055 | #define slub_debug 0 | 1072 | #define slub_debug 0 |
1056 | 1073 | ||
1074 | #define disable_higher_order_debug 0 | ||
1075 | |||
1057 | static inline unsigned long slabs_node(struct kmem_cache *s, int node) | 1076 | static inline unsigned long slabs_node(struct kmem_cache *s, int node) |
1058 | { return 0; } | 1077 | { return 0; } |
1059 | static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) | 1078 | static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) |
@@ -1109,8 +1128,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1109 | } | 1128 | } |
1110 | 1129 | ||
1111 | if (kmemcheck_enabled | 1130 | if (kmemcheck_enabled |
1112 | && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) | 1131 | && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { |
1113 | { | ||
1114 | int pages = 1 << oo_order(oo); | 1132 | int pages = 1 << oo_order(oo); |
1115 | 1133 | ||
1116 | kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); | 1134 | kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); |
@@ -1560,6 +1578,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) | |||
1560 | "default order: %d, min order: %d\n", s->name, s->objsize, | 1578 | "default order: %d, min order: %d\n", s->name, s->objsize, |
1561 | s->size, oo_order(s->oo), oo_order(s->min)); | 1579 | s->size, oo_order(s->oo), oo_order(s->min)); |
1562 | 1580 | ||
1581 | if (oo_order(s->min) > get_order(s->objsize)) | ||
1582 | printk(KERN_WARNING " %s debugging increased min order, use " | ||
1583 | "slub_debug=O to disable.\n", s->name); | ||
1584 | |||
1563 | for_each_online_node(node) { | 1585 | for_each_online_node(node) { |
1564 | struct kmem_cache_node *n = get_node(s, node); | 1586 | struct kmem_cache_node *n = get_node(s, node); |
1565 | unsigned long nr_slabs; | 1587 | unsigned long nr_slabs; |
@@ -2001,7 +2023,7 @@ static inline int calculate_order(int size) | |||
2001 | return order; | 2023 | return order; |
2002 | fraction /= 2; | 2024 | fraction /= 2; |
2003 | } | 2025 | } |
2004 | min_objects --; | 2026 | min_objects--; |
2005 | } | 2027 | } |
2006 | 2028 | ||
2007 | /* | 2029 | /* |
@@ -2091,8 +2113,8 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) | |||
2091 | */ | 2113 | */ |
2092 | #define NR_KMEM_CACHE_CPU 100 | 2114 | #define NR_KMEM_CACHE_CPU 100 |
2093 | 2115 | ||
2094 | static DEFINE_PER_CPU(struct kmem_cache_cpu, | 2116 | static DEFINE_PER_CPU(struct kmem_cache_cpu [NR_KMEM_CACHE_CPU], |
2095 | kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; | 2117 | kmem_cache_cpu); |
2096 | 2118 | ||
2097 | static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); | 2119 | static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); |
2098 | static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS); | 2120 | static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS); |
@@ -2400,6 +2422,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) | |||
2400 | * on bootup. | 2422 | * on bootup. |
2401 | */ | 2423 | */ |
2402 | align = calculate_alignment(flags, align, s->objsize); | 2424 | align = calculate_alignment(flags, align, s->objsize); |
2425 | s->align = align; | ||
2403 | 2426 | ||
2404 | /* | 2427 | /* |
2405 | * SLUB stores one object immediately after another beginning from | 2428 | * SLUB stores one object immediately after another beginning from |
@@ -2452,6 +2475,18 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | |||
2452 | 2475 | ||
2453 | if (!calculate_sizes(s, -1)) | 2476 | if (!calculate_sizes(s, -1)) |
2454 | goto error; | 2477 | goto error; |
2478 | if (disable_higher_order_debug) { | ||
2479 | /* | ||
2480 | * Disable debugging flags that store metadata if the min slab | ||
2481 | * order increased. | ||
2482 | */ | ||
2483 | if (get_order(s->size) > get_order(s->objsize)) { | ||
2484 | s->flags &= ~DEBUG_METADATA_FLAGS; | ||
2485 | s->offset = 0; | ||
2486 | if (!calculate_sizes(s, -1)) | ||
2487 | goto error; | ||
2488 | } | ||
2489 | } | ||
2455 | 2490 | ||
2456 | /* | 2491 | /* |
2457 | * The larger the object size is, the more pages we want on the partial | 2492 | * The larger the object size is, the more pages we want on the partial |
@@ -2790,6 +2825,11 @@ static s8 size_index[24] = { | |||
2790 | 2 /* 192 */ | 2825 | 2 /* 192 */ |
2791 | }; | 2826 | }; |
2792 | 2827 | ||
2828 | static inline int size_index_elem(size_t bytes) | ||
2829 | { | ||
2830 | return (bytes - 1) / 8; | ||
2831 | } | ||
2832 | |||
2793 | static struct kmem_cache *get_slab(size_t size, gfp_t flags) | 2833 | static struct kmem_cache *get_slab(size_t size, gfp_t flags) |
2794 | { | 2834 | { |
2795 | int index; | 2835 | int index; |
@@ -2798,7 +2838,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2798 | if (!size) | 2838 | if (!size) |
2799 | return ZERO_SIZE_PTR; | 2839 | return ZERO_SIZE_PTR; |
2800 | 2840 | ||
2801 | index = size_index[(size - 1) / 8]; | 2841 | index = size_index[size_index_elem(size)]; |
2802 | } else | 2842 | } else |
2803 | index = fls(size - 1); | 2843 | index = fls(size - 1); |
2804 | 2844 | ||
@@ -3156,10 +3196,12 @@ void __init kmem_cache_init(void) | |||
3156 | slab_state = PARTIAL; | 3196 | slab_state = PARTIAL; |
3157 | 3197 | ||
3158 | /* Caches that are not of the two-to-the-power-of size */ | 3198 | /* Caches that are not of the two-to-the-power-of size */ |
3159 | if (KMALLOC_MIN_SIZE <= 64) { | 3199 | if (KMALLOC_MIN_SIZE <= 32) { |
3160 | create_kmalloc_cache(&kmalloc_caches[1], | 3200 | create_kmalloc_cache(&kmalloc_caches[1], |
3161 | "kmalloc-96", 96, GFP_NOWAIT); | 3201 | "kmalloc-96", 96, GFP_NOWAIT); |
3162 | caches++; | 3202 | caches++; |
3203 | } | ||
3204 | if (KMALLOC_MIN_SIZE <= 64) { | ||
3163 | create_kmalloc_cache(&kmalloc_caches[2], | 3205 | create_kmalloc_cache(&kmalloc_caches[2], |
3164 | "kmalloc-192", 192, GFP_NOWAIT); | 3206 | "kmalloc-192", 192, GFP_NOWAIT); |
3165 | caches++; | 3207 | caches++; |
@@ -3186,17 +3228,28 @@ void __init kmem_cache_init(void) | |||
3186 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || | 3228 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || |
3187 | (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); | 3229 | (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); |
3188 | 3230 | ||
3189 | for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) | 3231 | for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { |
3190 | size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; | 3232 | int elem = size_index_elem(i); |
3233 | if (elem >= ARRAY_SIZE(size_index)) | ||
3234 | break; | ||
3235 | size_index[elem] = KMALLOC_SHIFT_LOW; | ||
3236 | } | ||
3191 | 3237 | ||
3192 | if (KMALLOC_MIN_SIZE == 128) { | 3238 | if (KMALLOC_MIN_SIZE == 64) { |
3239 | /* | ||
3240 | * The 96 byte size cache is not used if the alignment | ||
3241 | * is 64 byte. | ||
3242 | */ | ||
3243 | for (i = 64 + 8; i <= 96; i += 8) | ||
3244 | size_index[size_index_elem(i)] = 7; | ||
3245 | } else if (KMALLOC_MIN_SIZE == 128) { | ||
3193 | /* | 3246 | /* |
3194 | * The 192 byte sized cache is not used if the alignment | 3247 | * The 192 byte sized cache is not used if the alignment |
3195 | * is 128 byte. Redirect kmalloc to use the 256 byte cache | 3248 | * is 128 byte. Redirect kmalloc to use the 256 byte cache |
3196 | * instead. | 3249 | * instead. |
3197 | */ | 3250 | */ |
3198 | for (i = 128 + 8; i <= 192; i += 8) | 3251 | for (i = 128 + 8; i <= 192; i += 8) |
3199 | size_index[(i - 1) / 8] = 8; | 3252 | size_index[size_index_elem(i)] = 8; |
3200 | } | 3253 | } |
3201 | 3254 | ||
3202 | slab_state = UP; | 3255 | slab_state = UP; |
@@ -3292,6 +3345,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3292 | { | 3345 | { |
3293 | struct kmem_cache *s; | 3346 | struct kmem_cache *s; |
3294 | 3347 | ||
3348 | if (WARN_ON(!name)) | ||
3349 | return NULL; | ||
3350 | |||
3295 | down_write(&slub_lock); | 3351 | down_write(&slub_lock); |
3296 | s = find_mergeable(size, align, flags, name, ctor); | 3352 | s = find_mergeable(size, align, flags, name, ctor); |
3297 | if (s) { | 3353 | if (s) { |
@@ -4543,8 +4599,11 @@ static int sysfs_slab_add(struct kmem_cache *s) | |||
4543 | } | 4599 | } |
4544 | 4600 | ||
4545 | err = sysfs_create_group(&s->kobj, &slab_attr_group); | 4601 | err = sysfs_create_group(&s->kobj, &slab_attr_group); |
4546 | if (err) | 4602 | if (err) { |
4603 | kobject_del(&s->kobj); | ||
4604 | kobject_put(&s->kobj); | ||
4547 | return err; | 4605 | return err; |
4606 | } | ||
4548 | kobject_uevent(&s->kobj, KOBJ_ADD); | 4607 | kobject_uevent(&s->kobj, KOBJ_ADD); |
4549 | if (!unmergeable) { | 4608 | if (!unmergeable) { |
4550 | /* Setup first alias */ | 4609 | /* Setup first alias */ |
@@ -4726,7 +4785,7 @@ static const struct file_operations proc_slabinfo_operations = { | |||
4726 | 4785 | ||
4727 | static int __init slab_proc_init(void) | 4786 | static int __init slab_proc_init(void) |
4728 | { | 4787 | { |
4729 | proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); | 4788 | proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations); |
4730 | return 0; | 4789 | return 0; |
4731 | } | 4790 | } |
4732 | module_init(slab_proc_init); | 4791 | module_init(slab_proc_init); |