diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-06 14:44:08 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-06 14:44:08 -0400 |
commit | b57bdda58cda0aaf6def042d101dd85977a286ed (patch) | |
tree | 7f63afddb8275d67214d7a89cfc8a65815e79d42 /mm | |
parent | cc41f5cede3c63836d1c0958204630b07f5b5ee7 (diff) | |
parent | 415cb47998c54195710d413c3d95e37a9339c1e8 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
slub: Allow removal of slab caches during boot
Revert "slub: Allow removal of slab caches during boot"
slub numa: Fix rare allocation from unexpected node
slab: use deferable timers for its periodic housekeeping
slub: Use kmem_cache flags to detect if slab is in debugging mode.
slub: Allow removal of slab caches during boot
slub: Check kasprintf results in kmem_cache_init()
SLUB: Constants need UL
slub: Use a constant for a unspecified node.
SLOB: Free objects to their own list
slab: fix caller tracking on !CONFIG_DEBUG_SLAB && CONFIG_TRACING
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/slob.c | 9 | ||||
-rw-r--r-- | mm/slub.c | 86 |
3 files changed, 52 insertions, 45 deletions
@@ -860,7 +860,7 @@ static void __cpuinit start_cpu_timer(int cpu) | |||
860 | */ | 860 | */ |
861 | if (keventd_up() && reap_work->work.func == NULL) { | 861 | if (keventd_up() && reap_work->work.func == NULL) { |
862 | init_reap_node(cpu); | 862 | init_reap_node(cpu); |
863 | INIT_DELAYED_WORK(reap_work, cache_reap); | 863 | INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap); |
864 | schedule_delayed_work_on(cpu, reap_work, | 864 | schedule_delayed_work_on(cpu, reap_work, |
865 | __round_jiffies_relative(HZ, cpu)); | 865 | __round_jiffies_relative(HZ, cpu)); |
866 | } | 866 | } |
@@ -396,6 +396,7 @@ static void slob_free(void *block, int size) | |||
396 | slob_t *prev, *next, *b = (slob_t *)block; | 396 | slob_t *prev, *next, *b = (slob_t *)block; |
397 | slobidx_t units; | 397 | slobidx_t units; |
398 | unsigned long flags; | 398 | unsigned long flags; |
399 | struct list_head *slob_list; | ||
399 | 400 | ||
400 | if (unlikely(ZERO_OR_NULL_PTR(block))) | 401 | if (unlikely(ZERO_OR_NULL_PTR(block))) |
401 | return; | 402 | return; |
@@ -424,7 +425,13 @@ static void slob_free(void *block, int size) | |||
424 | set_slob(b, units, | 425 | set_slob(b, units, |
425 | (void *)((unsigned long)(b + | 426 | (void *)((unsigned long)(b + |
426 | SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); | 427 | SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); |
427 | set_slob_page_free(sp, &free_slob_small); | 428 | if (size < SLOB_BREAK1) |
429 | slob_list = &free_slob_small; | ||
430 | else if (size < SLOB_BREAK2) | ||
431 | slob_list = &free_slob_medium; | ||
432 | else | ||
433 | slob_list = &free_slob_large; | ||
434 | set_slob_page_free(sp, slob_list); | ||
428 | goto out; | 435 | goto out; |
429 | } | 436 | } |
430 | 437 | ||
@@ -106,11 +106,17 @@ | |||
106 | * the fast path and disables lockless freelists. | 106 | * the fast path and disables lockless freelists. |
107 | */ | 107 | */ |
108 | 108 | ||
109 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | ||
110 | SLAB_TRACE | SLAB_DEBUG_FREE) | ||
111 | |||
112 | static inline int kmem_cache_debug(struct kmem_cache *s) | ||
113 | { | ||
109 | #ifdef CONFIG_SLUB_DEBUG | 114 | #ifdef CONFIG_SLUB_DEBUG |
110 | #define SLABDEBUG 1 | 115 | return unlikely(s->flags & SLAB_DEBUG_FLAGS); |
111 | #else | 116 | #else |
112 | #define SLABDEBUG 0 | 117 | return 0; |
113 | #endif | 118 | #endif |
119 | } | ||
114 | 120 | ||
115 | /* | 121 | /* |
116 | * Issues still to be resolved: | 122 | * Issues still to be resolved: |
@@ -161,8 +167,8 @@ | |||
161 | #define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ | 167 | #define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ |
162 | 168 | ||
163 | /* Internal SLUB flags */ | 169 | /* Internal SLUB flags */ |
164 | #define __OBJECT_POISON 0x80000000 /* Poison object */ | 170 | #define __OBJECT_POISON 0x80000000UL /* Poison object */ |
165 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ | 171 | #define __SYSFS_ADD_DEFERRED 0x40000000UL /* Not yet visible via sysfs */ |
166 | 172 | ||
167 | static int kmem_size = sizeof(struct kmem_cache); | 173 | static int kmem_size = sizeof(struct kmem_cache); |
168 | 174 | ||
@@ -1072,7 +1078,7 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node, | |||
1072 | 1078 | ||
1073 | flags |= __GFP_NOTRACK; | 1079 | flags |= __GFP_NOTRACK; |
1074 | 1080 | ||
1075 | if (node == -1) | 1081 | if (node == NUMA_NO_NODE) |
1076 | return alloc_pages(flags, order); | 1082 | return alloc_pages(flags, order); |
1077 | else | 1083 | else |
1078 | return alloc_pages_exact_node(node, flags, order); | 1084 | return alloc_pages_exact_node(node, flags, order); |
@@ -1156,9 +1162,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1156 | inc_slabs_node(s, page_to_nid(page), page->objects); | 1162 | inc_slabs_node(s, page_to_nid(page), page->objects); |
1157 | page->slab = s; | 1163 | page->slab = s; |
1158 | page->flags |= 1 << PG_slab; | 1164 | page->flags |= 1 << PG_slab; |
1159 | if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | | ||
1160 | SLAB_STORE_USER | SLAB_TRACE)) | ||
1161 | __SetPageSlubDebug(page); | ||
1162 | 1165 | ||
1163 | start = page_address(page); | 1166 | start = page_address(page); |
1164 | 1167 | ||
@@ -1185,14 +1188,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1185 | int order = compound_order(page); | 1188 | int order = compound_order(page); |
1186 | int pages = 1 << order; | 1189 | int pages = 1 << order; |
1187 | 1190 | ||
1188 | if (unlikely(SLABDEBUG && PageSlubDebug(page))) { | 1191 | if (kmem_cache_debug(s)) { |
1189 | void *p; | 1192 | void *p; |
1190 | 1193 | ||
1191 | slab_pad_check(s, page); | 1194 | slab_pad_check(s, page); |
1192 | for_each_object(p, s, page_address(page), | 1195 | for_each_object(p, s, page_address(page), |
1193 | page->objects) | 1196 | page->objects) |
1194 | check_object(s, page, p, 0); | 1197 | check_object(s, page, p, 0); |
1195 | __ClearPageSlubDebug(page); | ||
1196 | } | 1198 | } |
1197 | 1199 | ||
1198 | kmemcheck_free_shadow(page, compound_order(page)); | 1200 | kmemcheck_free_shadow(page, compound_order(page)); |
@@ -1386,10 +1388,10 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
1386 | static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) | 1388 | static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) |
1387 | { | 1389 | { |
1388 | struct page *page; | 1390 | struct page *page; |
1389 | int searchnode = (node == -1) ? numa_node_id() : node; | 1391 | int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; |
1390 | 1392 | ||
1391 | page = get_partial_node(get_node(s, searchnode)); | 1393 | page = get_partial_node(get_node(s, searchnode)); |
1392 | if (page || (flags & __GFP_THISNODE)) | 1394 | if (page || node != -1) |
1393 | return page; | 1395 | return page; |
1394 | 1396 | ||
1395 | return get_any_partial(s, flags); | 1397 | return get_any_partial(s, flags); |
@@ -1414,8 +1416,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
1414 | stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); | 1416 | stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); |
1415 | } else { | 1417 | } else { |
1416 | stat(s, DEACTIVATE_FULL); | 1418 | stat(s, DEACTIVATE_FULL); |
1417 | if (SLABDEBUG && PageSlubDebug(page) && | 1419 | if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER)) |
1418 | (s->flags & SLAB_STORE_USER)) | ||
1419 | add_full(n, page); | 1420 | add_full(n, page); |
1420 | } | 1421 | } |
1421 | slab_unlock(page); | 1422 | slab_unlock(page); |
@@ -1514,7 +1515,7 @@ static void flush_all(struct kmem_cache *s) | |||
1514 | static inline int node_match(struct kmem_cache_cpu *c, int node) | 1515 | static inline int node_match(struct kmem_cache_cpu *c, int node) |
1515 | { | 1516 | { |
1516 | #ifdef CONFIG_NUMA | 1517 | #ifdef CONFIG_NUMA |
1517 | if (node != -1 && c->node != node) | 1518 | if (node != NUMA_NO_NODE && c->node != node) |
1518 | return 0; | 1519 | return 0; |
1519 | #endif | 1520 | #endif |
1520 | return 1; | 1521 | return 1; |
@@ -1623,7 +1624,7 @@ load_freelist: | |||
1623 | object = c->page->freelist; | 1624 | object = c->page->freelist; |
1624 | if (unlikely(!object)) | 1625 | if (unlikely(!object)) |
1625 | goto another_slab; | 1626 | goto another_slab; |
1626 | if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) | 1627 | if (kmem_cache_debug(s)) |
1627 | goto debug; | 1628 | goto debug; |
1628 | 1629 | ||
1629 | c->freelist = get_freepointer(s, object); | 1630 | c->freelist = get_freepointer(s, object); |
@@ -1726,7 +1727,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1726 | 1727 | ||
1727 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1728 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
1728 | { | 1729 | { |
1729 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); | 1730 | void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); |
1730 | 1731 | ||
1731 | trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); | 1732 | trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); |
1732 | 1733 | ||
@@ -1737,7 +1738,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); | |||
1737 | #ifdef CONFIG_TRACING | 1738 | #ifdef CONFIG_TRACING |
1738 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | 1739 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) |
1739 | { | 1740 | { |
1740 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | 1741 | return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); |
1741 | } | 1742 | } |
1742 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | 1743 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); |
1743 | #endif | 1744 | #endif |
@@ -1782,7 +1783,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
1782 | stat(s, FREE_SLOWPATH); | 1783 | stat(s, FREE_SLOWPATH); |
1783 | slab_lock(page); | 1784 | slab_lock(page); |
1784 | 1785 | ||
1785 | if (unlikely(SLABDEBUG && PageSlubDebug(page))) | 1786 | if (kmem_cache_debug(s)) |
1786 | goto debug; | 1787 | goto debug; |
1787 | 1788 | ||
1788 | checks_ok: | 1789 | checks_ok: |
@@ -2489,7 +2490,6 @@ void kmem_cache_destroy(struct kmem_cache *s) | |||
2489 | s->refcount--; | 2490 | s->refcount--; |
2490 | if (!s->refcount) { | 2491 | if (!s->refcount) { |
2491 | list_del(&s->list); | 2492 | list_del(&s->list); |
2492 | up_write(&slub_lock); | ||
2493 | if (kmem_cache_close(s)) { | 2493 | if (kmem_cache_close(s)) { |
2494 | printk(KERN_ERR "SLUB %s: %s called for cache that " | 2494 | printk(KERN_ERR "SLUB %s: %s called for cache that " |
2495 | "still has objects.\n", s->name, __func__); | 2495 | "still has objects.\n", s->name, __func__); |
@@ -2498,8 +2498,8 @@ void kmem_cache_destroy(struct kmem_cache *s) | |||
2498 | if (s->flags & SLAB_DESTROY_BY_RCU) | 2498 | if (s->flags & SLAB_DESTROY_BY_RCU) |
2499 | rcu_barrier(); | 2499 | rcu_barrier(); |
2500 | sysfs_slab_remove(s); | 2500 | sysfs_slab_remove(s); |
2501 | } else | 2501 | } |
2502 | up_write(&slub_lock); | 2502 | up_write(&slub_lock); |
2503 | } | 2503 | } |
2504 | EXPORT_SYMBOL(kmem_cache_destroy); | 2504 | EXPORT_SYMBOL(kmem_cache_destroy); |
2505 | 2505 | ||
@@ -2727,7 +2727,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2727 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2727 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2728 | return s; | 2728 | return s; |
2729 | 2729 | ||
2730 | ret = slab_alloc(s, flags, -1, _RET_IP_); | 2730 | ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); |
2731 | 2731 | ||
2732 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); | 2732 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); |
2733 | 2733 | ||
@@ -3117,9 +3117,12 @@ void __init kmem_cache_init(void) | |||
3117 | slab_state = UP; | 3117 | slab_state = UP; |
3118 | 3118 | ||
3119 | /* Provide the correct kmalloc names now that the caches are up */ | 3119 | /* Provide the correct kmalloc names now that the caches are up */ |
3120 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) | 3120 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
3121 | kmalloc_caches[i]. name = | 3121 | char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); |
3122 | kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); | 3122 | |
3123 | BUG_ON(!s); | ||
3124 | kmalloc_caches[i].name = s; | ||
3125 | } | ||
3123 | 3126 | ||
3124 | #ifdef CONFIG_SMP | 3127 | #ifdef CONFIG_SMP |
3125 | register_cpu_notifier(&slab_notifier); | 3128 | register_cpu_notifier(&slab_notifier); |
@@ -3222,14 +3225,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3222 | */ | 3225 | */ |
3223 | s->objsize = max(s->objsize, (int)size); | 3226 | s->objsize = max(s->objsize, (int)size); |
3224 | s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); | 3227 | s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); |
3225 | up_write(&slub_lock); | ||
3226 | 3228 | ||
3227 | if (sysfs_slab_alias(s, name)) { | 3229 | if (sysfs_slab_alias(s, name)) { |
3228 | down_write(&slub_lock); | ||
3229 | s->refcount--; | 3230 | s->refcount--; |
3230 | up_write(&slub_lock); | ||
3231 | goto err; | 3231 | goto err; |
3232 | } | 3232 | } |
3233 | up_write(&slub_lock); | ||
3233 | return s; | 3234 | return s; |
3234 | } | 3235 | } |
3235 | 3236 | ||
@@ -3238,14 +3239,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3238 | if (kmem_cache_open(s, GFP_KERNEL, name, | 3239 | if (kmem_cache_open(s, GFP_KERNEL, name, |
3239 | size, align, flags, ctor)) { | 3240 | size, align, flags, ctor)) { |
3240 | list_add(&s->list, &slab_caches); | 3241 | list_add(&s->list, &slab_caches); |
3241 | up_write(&slub_lock); | ||
3242 | if (sysfs_slab_add(s)) { | 3242 | if (sysfs_slab_add(s)) { |
3243 | down_write(&slub_lock); | ||
3244 | list_del(&s->list); | 3243 | list_del(&s->list); |
3245 | up_write(&slub_lock); | ||
3246 | kfree(s); | 3244 | kfree(s); |
3247 | goto err; | 3245 | goto err; |
3248 | } | 3246 | } |
3247 | up_write(&slub_lock); | ||
3249 | return s; | 3248 | return s; |
3250 | } | 3249 | } |
3251 | kfree(s); | 3250 | kfree(s); |
@@ -3311,7 +3310,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
3311 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3310 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3312 | return s; | 3311 | return s; |
3313 | 3312 | ||
3314 | ret = slab_alloc(s, gfpflags, -1, caller); | 3313 | ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); |
3315 | 3314 | ||
3316 | /* Honor the call site pointer we recieved. */ | 3315 | /* Honor the call site pointer we recieved. */ |
3317 | trace_kmalloc(caller, ret, size, s->size, gfpflags); | 3316 | trace_kmalloc(caller, ret, size, s->size, gfpflags); |
@@ -3394,16 +3393,6 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page, | |||
3394 | } else | 3393 | } else |
3395 | printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", | 3394 | printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", |
3396 | s->name, page); | 3395 | s->name, page); |
3397 | |||
3398 | if (s->flags & DEBUG_DEFAULT_FLAGS) { | ||
3399 | if (!PageSlubDebug(page)) | ||
3400 | printk(KERN_ERR "SLUB %s: SlubDebug not set " | ||
3401 | "on slab 0x%p\n", s->name, page); | ||
3402 | } else { | ||
3403 | if (PageSlubDebug(page)) | ||
3404 | printk(KERN_ERR "SLUB %s: SlubDebug set on " | ||
3405 | "slab 0x%p\n", s->name, page); | ||
3406 | } | ||
3407 | } | 3396 | } |
3408 | 3397 | ||
3409 | static int validate_slab_node(struct kmem_cache *s, | 3398 | static int validate_slab_node(struct kmem_cache *s, |
@@ -4503,6 +4492,13 @@ static int sysfs_slab_add(struct kmem_cache *s) | |||
4503 | 4492 | ||
4504 | static void sysfs_slab_remove(struct kmem_cache *s) | 4493 | static void sysfs_slab_remove(struct kmem_cache *s) |
4505 | { | 4494 | { |
4495 | if (slab_state < SYSFS) | ||
4496 | /* | ||
4497 | * Sysfs has not been setup yet so no need to remove the | ||
4498 | * cache from sysfs. | ||
4499 | */ | ||
4500 | return; | ||
4501 | |||
4506 | kobject_uevent(&s->kobj, KOBJ_REMOVE); | 4502 | kobject_uevent(&s->kobj, KOBJ_REMOVE); |
4507 | kobject_del(&s->kobj); | 4503 | kobject_del(&s->kobj); |
4508 | kobject_put(&s->kobj); | 4504 | kobject_put(&s->kobj); |
@@ -4548,8 +4544,11 @@ static int __init slab_sysfs_init(void) | |||
4548 | struct kmem_cache *s; | 4544 | struct kmem_cache *s; |
4549 | int err; | 4545 | int err; |
4550 | 4546 | ||
4547 | down_write(&slub_lock); | ||
4548 | |||
4551 | slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); | 4549 | slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); |
4552 | if (!slab_kset) { | 4550 | if (!slab_kset) { |
4551 | up_write(&slub_lock); | ||
4553 | printk(KERN_ERR "Cannot register slab subsystem.\n"); | 4552 | printk(KERN_ERR "Cannot register slab subsystem.\n"); |
4554 | return -ENOSYS; | 4553 | return -ENOSYS; |
4555 | } | 4554 | } |
@@ -4574,6 +4573,7 @@ static int __init slab_sysfs_init(void) | |||
4574 | kfree(al); | 4573 | kfree(al); |
4575 | } | 4574 | } |
4576 | 4575 | ||
4576 | up_write(&slub_lock); | ||
4577 | resiliency_test(); | 4577 | resiliency_test(); |
4578 | return 0; | 4578 | return 0; |
4579 | } | 4579 | } |