aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/slab.h6
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slob.c9
-rw-r--r--mm/slub.c86
5 files changed, 56 insertions, 49 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5b59f35dcb8f..6fa317801e1c 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -128,7 +128,6 @@ enum pageflags {
128 128
129 /* SLUB */ 129 /* SLUB */
130 PG_slub_frozen = PG_active, 130 PG_slub_frozen = PG_active,
131 PG_slub_debug = PG_error,
132}; 131};
133 132
134#ifndef __GENERATING_BOUNDS_H 133#ifndef __GENERATING_BOUNDS_H
@@ -215,7 +214,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
215__PAGEFLAG(SlobFree, slob_free) 214__PAGEFLAG(SlobFree, slob_free)
216 215
217__PAGEFLAG(SlubFrozen, slub_frozen) 216__PAGEFLAG(SlubFrozen, slub_frozen)
218__PAGEFLAG(SlubDebug, slub_debug)
219 217
220/* 218/*
221 * Private page markings that may be used by the filesystem that owns the page 219 * Private page markings that may be used by the filesystem that owns the page
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 49d1247cd6d9..59260e21bdf5 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -268,7 +268,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
268 * allocator where we care about the real place the memory allocation 268 * allocator where we care about the real place the memory allocation
269 * request comes from. 269 * request comes from.
270 */ 270 */
271#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) 271#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
272 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
272extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); 273extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
273#define kmalloc_track_caller(size, flags) \ 274#define kmalloc_track_caller(size, flags) \
274 __kmalloc_track_caller(size, flags, _RET_IP_) 275 __kmalloc_track_caller(size, flags, _RET_IP_)
@@ -286,7 +287,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
286 * standard allocator where we care about the real place the memory 287 * standard allocator where we care about the real place the memory
287 * allocation request comes from. 288 * allocation request comes from.
288 */ 289 */
289#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) 290#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
291 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
290extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); 292extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
291#define kmalloc_node_track_caller(size, flags, node) \ 293#define kmalloc_node_track_caller(size, flags, node) \
292 __kmalloc_node_track_caller(size, flags, node, \ 294 __kmalloc_node_track_caller(size, flags, node, \
diff --git a/mm/slab.c b/mm/slab.c
index 47360c3e5abd..736e497733d6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -860,7 +860,7 @@ static void __cpuinit start_cpu_timer(int cpu)
860 */ 860 */
861 if (keventd_up() && reap_work->work.func == NULL) { 861 if (keventd_up() && reap_work->work.func == NULL) {
862 init_reap_node(cpu); 862 init_reap_node(cpu);
863 INIT_DELAYED_WORK(reap_work, cache_reap); 863 INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap);
864 schedule_delayed_work_on(cpu, reap_work, 864 schedule_delayed_work_on(cpu, reap_work,
865 __round_jiffies_relative(HZ, cpu)); 865 __round_jiffies_relative(HZ, cpu));
866 } 866 }
diff --git a/mm/slob.c b/mm/slob.c
index 3f19a347dabf..d582171c8101 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -396,6 +396,7 @@ static void slob_free(void *block, int size)
396 slob_t *prev, *next, *b = (slob_t *)block; 396 slob_t *prev, *next, *b = (slob_t *)block;
397 slobidx_t units; 397 slobidx_t units;
398 unsigned long flags; 398 unsigned long flags;
399 struct list_head *slob_list;
399 400
400 if (unlikely(ZERO_OR_NULL_PTR(block))) 401 if (unlikely(ZERO_OR_NULL_PTR(block)))
401 return; 402 return;
@@ -424,7 +425,13 @@ static void slob_free(void *block, int size)
424 set_slob(b, units, 425 set_slob(b, units,
425 (void *)((unsigned long)(b + 426 (void *)((unsigned long)(b +
426 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); 427 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
427 set_slob_page_free(sp, &free_slob_small); 428 if (size < SLOB_BREAK1)
429 slob_list = &free_slob_small;
430 else if (size < SLOB_BREAK2)
431 slob_list = &free_slob_medium;
432 else
433 slob_list = &free_slob_large;
434 set_slob_page_free(sp, slob_list);
428 goto out; 435 goto out;
429 } 436 }
430 437
diff --git a/mm/slub.c b/mm/slub.c
index 7bb7940f4eee..13fffe1f0f3d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -106,11 +106,17 @@
106 * the fast path and disables lockless freelists. 106 * the fast path and disables lockless freelists.
107 */ 107 */
108 108
109#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
110 SLAB_TRACE | SLAB_DEBUG_FREE)
111
112static inline int kmem_cache_debug(struct kmem_cache *s)
113{
109#ifdef CONFIG_SLUB_DEBUG 114#ifdef CONFIG_SLUB_DEBUG
110#define SLABDEBUG 1 115 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
111#else 116#else
112#define SLABDEBUG 0 117 return 0;
113#endif 118#endif
119}
114 120
115/* 121/*
116 * Issues still to be resolved: 122 * Issues still to be resolved:
@@ -161,8 +167,8 @@
161#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ 167#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
162 168
163/* Internal SLUB flags */ 169/* Internal SLUB flags */
164#define __OBJECT_POISON 0x80000000 /* Poison object */ 170#define __OBJECT_POISON 0x80000000UL /* Poison object */
165#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 171#define __SYSFS_ADD_DEFERRED 0x40000000UL /* Not yet visible via sysfs */
166 172
167static int kmem_size = sizeof(struct kmem_cache); 173static int kmem_size = sizeof(struct kmem_cache);
168 174
@@ -1072,7 +1078,7 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
1072 1078
1073 flags |= __GFP_NOTRACK; 1079 flags |= __GFP_NOTRACK;
1074 1080
1075 if (node == -1) 1081 if (node == NUMA_NO_NODE)
1076 return alloc_pages(flags, order); 1082 return alloc_pages(flags, order);
1077 else 1083 else
1078 return alloc_pages_exact_node(node, flags, order); 1084 return alloc_pages_exact_node(node, flags, order);
@@ -1156,9 +1162,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1156 inc_slabs_node(s, page_to_nid(page), page->objects); 1162 inc_slabs_node(s, page_to_nid(page), page->objects);
1157 page->slab = s; 1163 page->slab = s;
1158 page->flags |= 1 << PG_slab; 1164 page->flags |= 1 << PG_slab;
1159 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1160 SLAB_STORE_USER | SLAB_TRACE))
1161 __SetPageSlubDebug(page);
1162 1165
1163 start = page_address(page); 1166 start = page_address(page);
1164 1167
@@ -1185,14 +1188,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1185 int order = compound_order(page); 1188 int order = compound_order(page);
1186 int pages = 1 << order; 1189 int pages = 1 << order;
1187 1190
1188 if (unlikely(SLABDEBUG && PageSlubDebug(page))) { 1191 if (kmem_cache_debug(s)) {
1189 void *p; 1192 void *p;
1190 1193
1191 slab_pad_check(s, page); 1194 slab_pad_check(s, page);
1192 for_each_object(p, s, page_address(page), 1195 for_each_object(p, s, page_address(page),
1193 page->objects) 1196 page->objects)
1194 check_object(s, page, p, 0); 1197 check_object(s, page, p, 0);
1195 __ClearPageSlubDebug(page);
1196 } 1198 }
1197 1199
1198 kmemcheck_free_shadow(page, compound_order(page)); 1200 kmemcheck_free_shadow(page, compound_order(page));
@@ -1386,10 +1388,10 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1386static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) 1388static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1387{ 1389{
1388 struct page *page; 1390 struct page *page;
1389 int searchnode = (node == -1) ? numa_node_id() : node; 1391 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
1390 1392
1391 page = get_partial_node(get_node(s, searchnode)); 1393 page = get_partial_node(get_node(s, searchnode));
1392 if (page || (flags & __GFP_THISNODE)) 1394 if (page || node != -1)
1393 return page; 1395 return page;
1394 1396
1395 return get_any_partial(s, flags); 1397 return get_any_partial(s, flags);
@@ -1414,8 +1416,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1414 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1416 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1415 } else { 1417 } else {
1416 stat(s, DEACTIVATE_FULL); 1418 stat(s, DEACTIVATE_FULL);
1417 if (SLABDEBUG && PageSlubDebug(page) && 1419 if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
1418 (s->flags & SLAB_STORE_USER))
1419 add_full(n, page); 1420 add_full(n, page);
1420 } 1421 }
1421 slab_unlock(page); 1422 slab_unlock(page);
@@ -1514,7 +1515,7 @@ static void flush_all(struct kmem_cache *s)
1514static inline int node_match(struct kmem_cache_cpu *c, int node) 1515static inline int node_match(struct kmem_cache_cpu *c, int node)
1515{ 1516{
1516#ifdef CONFIG_NUMA 1517#ifdef CONFIG_NUMA
1517 if (node != -1 && c->node != node) 1518 if (node != NUMA_NO_NODE && c->node != node)
1518 return 0; 1519 return 0;
1519#endif 1520#endif
1520 return 1; 1521 return 1;
@@ -1623,7 +1624,7 @@ load_freelist:
1623 object = c->page->freelist; 1624 object = c->page->freelist;
1624 if (unlikely(!object)) 1625 if (unlikely(!object))
1625 goto another_slab; 1626 goto another_slab;
1626 if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) 1627 if (kmem_cache_debug(s))
1627 goto debug; 1628 goto debug;
1628 1629
1629 c->freelist = get_freepointer(s, object); 1630 c->freelist = get_freepointer(s, object);
@@ -1726,7 +1727,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1726 1727
1727void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1728void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1728{ 1729{
1729 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); 1730 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
1730 1731
1731 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); 1732 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
1732 1733
@@ -1737,7 +1738,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
1737#ifdef CONFIG_TRACING 1738#ifdef CONFIG_TRACING
1738void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 1739void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1739{ 1740{
1740 return slab_alloc(s, gfpflags, -1, _RET_IP_); 1741 return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
1741} 1742}
1742EXPORT_SYMBOL(kmem_cache_alloc_notrace); 1743EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1743#endif 1744#endif
@@ -1782,7 +1783,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
1782 stat(s, FREE_SLOWPATH); 1783 stat(s, FREE_SLOWPATH);
1783 slab_lock(page); 1784 slab_lock(page);
1784 1785
1785 if (unlikely(SLABDEBUG && PageSlubDebug(page))) 1786 if (kmem_cache_debug(s))
1786 goto debug; 1787 goto debug;
1787 1788
1788checks_ok: 1789checks_ok:
@@ -2489,7 +2490,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
2489 s->refcount--; 2490 s->refcount--;
2490 if (!s->refcount) { 2491 if (!s->refcount) {
2491 list_del(&s->list); 2492 list_del(&s->list);
2492 up_write(&slub_lock);
2493 if (kmem_cache_close(s)) { 2493 if (kmem_cache_close(s)) {
2494 printk(KERN_ERR "SLUB %s: %s called for cache that " 2494 printk(KERN_ERR "SLUB %s: %s called for cache that "
2495 "still has objects.\n", s->name, __func__); 2495 "still has objects.\n", s->name, __func__);
@@ -2498,8 +2498,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
2498 if (s->flags & SLAB_DESTROY_BY_RCU) 2498 if (s->flags & SLAB_DESTROY_BY_RCU)
2499 rcu_barrier(); 2499 rcu_barrier();
2500 sysfs_slab_remove(s); 2500 sysfs_slab_remove(s);
2501 } else 2501 }
2502 up_write(&slub_lock); 2502 up_write(&slub_lock);
2503} 2503}
2504EXPORT_SYMBOL(kmem_cache_destroy); 2504EXPORT_SYMBOL(kmem_cache_destroy);
2505 2505
@@ -2727,7 +2727,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2727 if (unlikely(ZERO_OR_NULL_PTR(s))) 2727 if (unlikely(ZERO_OR_NULL_PTR(s)))
2728 return s; 2728 return s;
2729 2729
2730 ret = slab_alloc(s, flags, -1, _RET_IP_); 2730 ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
2731 2731
2732 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 2732 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
2733 2733
@@ -3117,9 +3117,12 @@ void __init kmem_cache_init(void)
3117 slab_state = UP; 3117 slab_state = UP;
3118 3118
3119 /* Provide the correct kmalloc names now that the caches are up */ 3119 /* Provide the correct kmalloc names now that the caches are up */
3120 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) 3120 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3121 kmalloc_caches[i]. name = 3121 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3122 kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); 3122
3123 BUG_ON(!s);
3124 kmalloc_caches[i].name = s;
3125 }
3123 3126
3124#ifdef CONFIG_SMP 3127#ifdef CONFIG_SMP
3125 register_cpu_notifier(&slab_notifier); 3128 register_cpu_notifier(&slab_notifier);
@@ -3222,14 +3225,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3222 */ 3225 */
3223 s->objsize = max(s->objsize, (int)size); 3226 s->objsize = max(s->objsize, (int)size);
3224 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3227 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3225 up_write(&slub_lock);
3226 3228
3227 if (sysfs_slab_alias(s, name)) { 3229 if (sysfs_slab_alias(s, name)) {
3228 down_write(&slub_lock);
3229 s->refcount--; 3230 s->refcount--;
3230 up_write(&slub_lock);
3231 goto err; 3231 goto err;
3232 } 3232 }
3233 up_write(&slub_lock);
3233 return s; 3234 return s;
3234 } 3235 }
3235 3236
@@ -3238,14 +3239,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3238 if (kmem_cache_open(s, GFP_KERNEL, name, 3239 if (kmem_cache_open(s, GFP_KERNEL, name,
3239 size, align, flags, ctor)) { 3240 size, align, flags, ctor)) {
3240 list_add(&s->list, &slab_caches); 3241 list_add(&s->list, &slab_caches);
3241 up_write(&slub_lock);
3242 if (sysfs_slab_add(s)) { 3242 if (sysfs_slab_add(s)) {
3243 down_write(&slub_lock);
3244 list_del(&s->list); 3243 list_del(&s->list);
3245 up_write(&slub_lock);
3246 kfree(s); 3244 kfree(s);
3247 goto err; 3245 goto err;
3248 } 3246 }
3247 up_write(&slub_lock);
3249 return s; 3248 return s;
3250 } 3249 }
3251 kfree(s); 3250 kfree(s);
@@ -3311,7 +3310,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3311 if (unlikely(ZERO_OR_NULL_PTR(s))) 3310 if (unlikely(ZERO_OR_NULL_PTR(s)))
3312 return s; 3311 return s;
3313 3312
3314 ret = slab_alloc(s, gfpflags, -1, caller); 3313 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
3315 3314
3316 /* Honor the call site pointer we recieved. */ 3315 /* Honor the call site pointer we recieved. */
3317 trace_kmalloc(caller, ret, size, s->size, gfpflags); 3316 trace_kmalloc(caller, ret, size, s->size, gfpflags);
@@ -3394,16 +3393,6 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3394 } else 3393 } else
3395 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 3394 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3396 s->name, page); 3395 s->name, page);
3397
3398 if (s->flags & DEBUG_DEFAULT_FLAGS) {
3399 if (!PageSlubDebug(page))
3400 printk(KERN_ERR "SLUB %s: SlubDebug not set "
3401 "on slab 0x%p\n", s->name, page);
3402 } else {
3403 if (PageSlubDebug(page))
3404 printk(KERN_ERR "SLUB %s: SlubDebug set on "
3405 "slab 0x%p\n", s->name, page);
3406 }
3407} 3396}
3408 3397
3409static int validate_slab_node(struct kmem_cache *s, 3398static int validate_slab_node(struct kmem_cache *s,
@@ -4503,6 +4492,13 @@ static int sysfs_slab_add(struct kmem_cache *s)
4503 4492
4504static void sysfs_slab_remove(struct kmem_cache *s) 4493static void sysfs_slab_remove(struct kmem_cache *s)
4505{ 4494{
4495 if (slab_state < SYSFS)
4496 /*
4497 * Sysfs has not been setup yet so no need to remove the
4498 * cache from sysfs.
4499 */
4500 return;
4501
4506 kobject_uevent(&s->kobj, KOBJ_REMOVE); 4502 kobject_uevent(&s->kobj, KOBJ_REMOVE);
4507 kobject_del(&s->kobj); 4503 kobject_del(&s->kobj);
4508 kobject_put(&s->kobj); 4504 kobject_put(&s->kobj);
@@ -4548,8 +4544,11 @@ static int __init slab_sysfs_init(void)
4548 struct kmem_cache *s; 4544 struct kmem_cache *s;
4549 int err; 4545 int err;
4550 4546
4547 down_write(&slub_lock);
4548
4551 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); 4549 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
4552 if (!slab_kset) { 4550 if (!slab_kset) {
4551 up_write(&slub_lock);
4553 printk(KERN_ERR "Cannot register slab subsystem.\n"); 4552 printk(KERN_ERR "Cannot register slab subsystem.\n");
4554 return -ENOSYS; 4553 return -ENOSYS;
4555 } 4554 }
@@ -4574,6 +4573,7 @@ static int __init slab_sysfs_init(void)
4574 kfree(al); 4573 kfree(al);
4575 } 4574 }
4576 4575
4576 up_write(&slub_lock);
4577 resiliency_test(); 4577 resiliency_test();
4578 return 0; 4578 return 0;
4579} 4579}