aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c86
1 files changed, 43 insertions, 43 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 578f68f3c51f..fba51d6d4cc4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -107,11 +107,17 @@
107 * the fast path and disables lockless freelists. 107 * the fast path and disables lockless freelists.
108 */ 108 */
109 109
110#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
111 SLAB_TRACE | SLAB_DEBUG_FREE)
112
113static inline int kmem_cache_debug(struct kmem_cache *s)
114{
110#ifdef CONFIG_SLUB_DEBUG 115#ifdef CONFIG_SLUB_DEBUG
111#define SLABDEBUG 1 116 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
112#else 117#else
113#define SLABDEBUG 0 118 return 0;
114#endif 119#endif
120}
115 121
116/* 122/*
117 * Issues still to be resolved: 123 * Issues still to be resolved:
@@ -162,8 +168,8 @@
162#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ 168#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
163 169
164/* Internal SLUB flags */ 170/* Internal SLUB flags */
165#define __OBJECT_POISON 0x80000000 /* Poison object */ 171#define __OBJECT_POISON 0x80000000UL /* Poison object */
166#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 172#define __SYSFS_ADD_DEFERRED 0x40000000UL /* Not yet visible via sysfs */
167 173
168static int kmem_size = sizeof(struct kmem_cache); 174static int kmem_size = sizeof(struct kmem_cache);
169 175
@@ -1073,7 +1079,7 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
1073 1079
1074 flags |= __GFP_NOTRACK; 1080 flags |= __GFP_NOTRACK;
1075 1081
1076 if (node == -1) 1082 if (node == NUMA_NO_NODE)
1077 return alloc_pages(flags, order); 1083 return alloc_pages(flags, order);
1078 else 1084 else
1079 return alloc_pages_exact_node(node, flags, order); 1085 return alloc_pages_exact_node(node, flags, order);
@@ -1157,9 +1163,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1157 inc_slabs_node(s, page_to_nid(page), page->objects); 1163 inc_slabs_node(s, page_to_nid(page), page->objects);
1158 page->slab = s; 1164 page->slab = s;
1159 page->flags |= 1 << PG_slab; 1165 page->flags |= 1 << PG_slab;
1160 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1161 SLAB_STORE_USER | SLAB_TRACE))
1162 __SetPageSlubDebug(page);
1163 1166
1164 start = page_address(page); 1167 start = page_address(page);
1165 1168
@@ -1186,14 +1189,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1186 int order = compound_order(page); 1189 int order = compound_order(page);
1187 int pages = 1 << order; 1190 int pages = 1 << order;
1188 1191
1189 if (unlikely(SLABDEBUG && PageSlubDebug(page))) { 1192 if (kmem_cache_debug(s)) {
1190 void *p; 1193 void *p;
1191 1194
1192 slab_pad_check(s, page); 1195 slab_pad_check(s, page);
1193 for_each_object(p, s, page_address(page), 1196 for_each_object(p, s, page_address(page),
1194 page->objects) 1197 page->objects)
1195 check_object(s, page, p, 0); 1198 check_object(s, page, p, 0);
1196 __ClearPageSlubDebug(page);
1197 } 1199 }
1198 1200
1199 kmemcheck_free_shadow(page, compound_order(page)); 1201 kmemcheck_free_shadow(page, compound_order(page));
@@ -1387,10 +1389,10 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1387static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) 1389static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1388{ 1390{
1389 struct page *page; 1391 struct page *page;
1390 int searchnode = (node == -1) ? numa_node_id() : node; 1392 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
1391 1393
1392 page = get_partial_node(get_node(s, searchnode)); 1394 page = get_partial_node(get_node(s, searchnode));
1393 if (page || (flags & __GFP_THISNODE)) 1395 if (page || node != -1)
1394 return page; 1396 return page;
1395 1397
1396 return get_any_partial(s, flags); 1398 return get_any_partial(s, flags);
@@ -1415,8 +1417,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1415 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1417 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1416 } else { 1418 } else {
1417 stat(s, DEACTIVATE_FULL); 1419 stat(s, DEACTIVATE_FULL);
1418 if (SLABDEBUG && PageSlubDebug(page) && 1420 if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
1419 (s->flags & SLAB_STORE_USER))
1420 add_full(n, page); 1421 add_full(n, page);
1421 } 1422 }
1422 slab_unlock(page); 1423 slab_unlock(page);
@@ -1515,7 +1516,7 @@ static void flush_all(struct kmem_cache *s)
1515static inline int node_match(struct kmem_cache_cpu *c, int node) 1516static inline int node_match(struct kmem_cache_cpu *c, int node)
1516{ 1517{
1517#ifdef CONFIG_NUMA 1518#ifdef CONFIG_NUMA
1518 if (node != -1 && c->node != node) 1519 if (node != NUMA_NO_NODE && c->node != node)
1519 return 0; 1520 return 0;
1520#endif 1521#endif
1521 return 1; 1522 return 1;
@@ -1624,7 +1625,7 @@ load_freelist:
1624 object = c->page->freelist; 1625 object = c->page->freelist;
1625 if (unlikely(!object)) 1626 if (unlikely(!object))
1626 goto another_slab; 1627 goto another_slab;
1627 if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) 1628 if (kmem_cache_debug(s))
1628 goto debug; 1629 goto debug;
1629 1630
1630 c->freelist = get_freepointer(s, object); 1631 c->freelist = get_freepointer(s, object);
@@ -1727,7 +1728,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1727 1728
1728void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1729void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1729{ 1730{
1730 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); 1731 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
1731 1732
1732 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); 1733 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
1733 1734
@@ -1738,7 +1739,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
1738#ifdef CONFIG_TRACING 1739#ifdef CONFIG_TRACING
1739void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 1740void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1740{ 1741{
1741 return slab_alloc(s, gfpflags, -1, _RET_IP_); 1742 return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
1742} 1743}
1743EXPORT_SYMBOL(kmem_cache_alloc_notrace); 1744EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1744#endif 1745#endif
@@ -1783,7 +1784,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
1783 stat(s, FREE_SLOWPATH); 1784 stat(s, FREE_SLOWPATH);
1784 slab_lock(page); 1785 slab_lock(page);
1785 1786
1786 if (unlikely(SLABDEBUG && PageSlubDebug(page))) 1787 if (kmem_cache_debug(s))
1787 goto debug; 1788 goto debug;
1788 1789
1789checks_ok: 1790checks_ok:
@@ -2490,7 +2491,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
2490 s->refcount--; 2491 s->refcount--;
2491 if (!s->refcount) { 2492 if (!s->refcount) {
2492 list_del(&s->list); 2493 list_del(&s->list);
2493 up_write(&slub_lock);
2494 if (kmem_cache_close(s)) { 2494 if (kmem_cache_close(s)) {
2495 printk(KERN_ERR "SLUB %s: %s called for cache that " 2495 printk(KERN_ERR "SLUB %s: %s called for cache that "
2496 "still has objects.\n", s->name, __func__); 2496 "still has objects.\n", s->name, __func__);
@@ -2499,8 +2499,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
2499 if (s->flags & SLAB_DESTROY_BY_RCU) 2499 if (s->flags & SLAB_DESTROY_BY_RCU)
2500 rcu_barrier(); 2500 rcu_barrier();
2501 sysfs_slab_remove(s); 2501 sysfs_slab_remove(s);
2502 } else 2502 }
2503 up_write(&slub_lock); 2503 up_write(&slub_lock);
2504} 2504}
2505EXPORT_SYMBOL(kmem_cache_destroy); 2505EXPORT_SYMBOL(kmem_cache_destroy);
2506 2506
@@ -2728,7 +2728,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2728 if (unlikely(ZERO_OR_NULL_PTR(s))) 2728 if (unlikely(ZERO_OR_NULL_PTR(s)))
2729 return s; 2729 return s;
2730 2730
2731 ret = slab_alloc(s, flags, -1, _RET_IP_); 2731 ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
2732 2732
2733 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 2733 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
2734 2734
@@ -3118,9 +3118,12 @@ void __init kmem_cache_init(void)
3118 slab_state = UP; 3118 slab_state = UP;
3119 3119
3120 /* Provide the correct kmalloc names now that the caches are up */ 3120 /* Provide the correct kmalloc names now that the caches are up */
3121 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) 3121 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3122 kmalloc_caches[i]. name = 3122 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3123 kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); 3123
3124 BUG_ON(!s);
3125 kmalloc_caches[i].name = s;
3126 }
3124 3127
3125#ifdef CONFIG_SMP 3128#ifdef CONFIG_SMP
3126 register_cpu_notifier(&slab_notifier); 3129 register_cpu_notifier(&slab_notifier);
@@ -3223,14 +3226,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3223 */ 3226 */
3224 s->objsize = max(s->objsize, (int)size); 3227 s->objsize = max(s->objsize, (int)size);
3225 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3228 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3226 up_write(&slub_lock);
3227 3229
3228 if (sysfs_slab_alias(s, name)) { 3230 if (sysfs_slab_alias(s, name)) {
3229 down_write(&slub_lock);
3230 s->refcount--; 3231 s->refcount--;
3231 up_write(&slub_lock);
3232 goto err; 3232 goto err;
3233 } 3233 }
3234 up_write(&slub_lock);
3234 return s; 3235 return s;
3235 } 3236 }
3236 3237
@@ -3239,14 +3240,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3239 if (kmem_cache_open(s, GFP_KERNEL, name, 3240 if (kmem_cache_open(s, GFP_KERNEL, name,
3240 size, align, flags, ctor)) { 3241 size, align, flags, ctor)) {
3241 list_add(&s->list, &slab_caches); 3242 list_add(&s->list, &slab_caches);
3242 up_write(&slub_lock);
3243 if (sysfs_slab_add(s)) { 3243 if (sysfs_slab_add(s)) {
3244 down_write(&slub_lock);
3245 list_del(&s->list); 3244 list_del(&s->list);
3246 up_write(&slub_lock);
3247 kfree(s); 3245 kfree(s);
3248 goto err; 3246 goto err;
3249 } 3247 }
3248 up_write(&slub_lock);
3250 return s; 3249 return s;
3251 } 3250 }
3252 kfree(s); 3251 kfree(s);
@@ -3312,7 +3311,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3312 if (unlikely(ZERO_OR_NULL_PTR(s))) 3311 if (unlikely(ZERO_OR_NULL_PTR(s)))
3313 return s; 3312 return s;
3314 3313
3315 ret = slab_alloc(s, gfpflags, -1, caller); 3314 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
3316 3315
3317 /* Honor the call site pointer we recieved. */ 3316 /* Honor the call site pointer we recieved. */
3318 trace_kmalloc(caller, ret, size, s->size, gfpflags); 3317 trace_kmalloc(caller, ret, size, s->size, gfpflags);
@@ -3395,16 +3394,6 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3395 } else 3394 } else
3396 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 3395 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3397 s->name, page); 3396 s->name, page);
3398
3399 if (s->flags & DEBUG_DEFAULT_FLAGS) {
3400 if (!PageSlubDebug(page))
3401 printk(KERN_ERR "SLUB %s: SlubDebug not set "
3402 "on slab 0x%p\n", s->name, page);
3403 } else {
3404 if (PageSlubDebug(page))
3405 printk(KERN_ERR "SLUB %s: SlubDebug set on "
3406 "slab 0x%p\n", s->name, page);
3407 }
3408} 3397}
3409 3398
3410static int validate_slab_node(struct kmem_cache *s, 3399static int validate_slab_node(struct kmem_cache *s,
@@ -4504,6 +4493,13 @@ static int sysfs_slab_add(struct kmem_cache *s)
4504 4493
4505static void sysfs_slab_remove(struct kmem_cache *s) 4494static void sysfs_slab_remove(struct kmem_cache *s)
4506{ 4495{
4496 if (slab_state < SYSFS)
4497 /*
4498 * Sysfs has not been setup yet so no need to remove the
4499 * cache from sysfs.
4500 */
4501 return;
4502
4507 kobject_uevent(&s->kobj, KOBJ_REMOVE); 4503 kobject_uevent(&s->kobj, KOBJ_REMOVE);
4508 kobject_del(&s->kobj); 4504 kobject_del(&s->kobj);
4509 kobject_put(&s->kobj); 4505 kobject_put(&s->kobj);
@@ -4549,8 +4545,11 @@ static int __init slab_sysfs_init(void)
4549 struct kmem_cache *s; 4545 struct kmem_cache *s;
4550 int err; 4546 int err;
4551 4547
4548 down_write(&slub_lock);
4549
4552 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); 4550 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
4553 if (!slab_kset) { 4551 if (!slab_kset) {
4552 up_write(&slub_lock);
4554 printk(KERN_ERR "Cannot register slab subsystem.\n"); 4553 printk(KERN_ERR "Cannot register slab subsystem.\n");
4555 return -ENOSYS; 4554 return -ENOSYS;
4556 } 4555 }
@@ -4575,6 +4574,7 @@ static int __init slab_sysfs_init(void)
4575 kfree(al); 4574 kfree(al);
4576 } 4575 }
4577 4576
4577 up_write(&slub_lock);
4578 resiliency_test(); 4578 resiliency_test();
4579 return 0; 4579 return 0;
4580} 4580}