diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 182 |
1 files changed, 99 insertions, 83 deletions
@@ -247,7 +247,10 @@ static void sysfs_slab_remove(struct kmem_cache *); | |||
247 | static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } | 247 | static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } |
248 | static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) | 248 | static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) |
249 | { return 0; } | 249 | { return 0; } |
250 | static inline void sysfs_slab_remove(struct kmem_cache *s) {} | 250 | static inline void sysfs_slab_remove(struct kmem_cache *s) |
251 | { | ||
252 | kfree(s); | ||
253 | } | ||
251 | #endif | 254 | #endif |
252 | 255 | ||
253 | /******************************************************************** | 256 | /******************************************************************** |
@@ -354,22 +357,22 @@ static void print_section(char *text, u8 *addr, unsigned int length) | |||
354 | printk(KERN_ERR "%8s 0x%p: ", text, addr + i); | 357 | printk(KERN_ERR "%8s 0x%p: ", text, addr + i); |
355 | newline = 0; | 358 | newline = 0; |
356 | } | 359 | } |
357 | printk(" %02x", addr[i]); | 360 | printk(KERN_CONT " %02x", addr[i]); |
358 | offset = i % 16; | 361 | offset = i % 16; |
359 | ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; | 362 | ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; |
360 | if (offset == 15) { | 363 | if (offset == 15) { |
361 | printk(" %s\n",ascii); | 364 | printk(KERN_CONT " %s\n", ascii); |
362 | newline = 1; | 365 | newline = 1; |
363 | } | 366 | } |
364 | } | 367 | } |
365 | if (!newline) { | 368 | if (!newline) { |
366 | i %= 16; | 369 | i %= 16; |
367 | while (i < 16) { | 370 | while (i < 16) { |
368 | printk(" "); | 371 | printk(KERN_CONT " "); |
369 | ascii[i] = ' '; | 372 | ascii[i] = ' '; |
370 | i++; | 373 | i++; |
371 | } | 374 | } |
372 | printk(" %s\n", ascii); | 375 | printk(KERN_CONT " %s\n", ascii); |
373 | } | 376 | } |
374 | } | 377 | } |
375 | 378 | ||
@@ -529,7 +532,7 @@ static void init_object(struct kmem_cache *s, void *object, int active) | |||
529 | 532 | ||
530 | if (s->flags & __OBJECT_POISON) { | 533 | if (s->flags & __OBJECT_POISON) { |
531 | memset(p, POISON_FREE, s->objsize - 1); | 534 | memset(p, POISON_FREE, s->objsize - 1); |
532 | p[s->objsize -1] = POISON_END; | 535 | p[s->objsize - 1] = POISON_END; |
533 | } | 536 | } |
534 | 537 | ||
535 | if (s->flags & SLAB_RED_ZONE) | 538 | if (s->flags & SLAB_RED_ZONE) |
@@ -558,7 +561,7 @@ static void restore_bytes(struct kmem_cache *s, char *message, u8 data, | |||
558 | 561 | ||
559 | static int check_bytes_and_report(struct kmem_cache *s, struct page *page, | 562 | static int check_bytes_and_report(struct kmem_cache *s, struct page *page, |
560 | u8 *object, char *what, | 563 | u8 *object, char *what, |
561 | u8* start, unsigned int value, unsigned int bytes) | 564 | u8 *start, unsigned int value, unsigned int bytes) |
562 | { | 565 | { |
563 | u8 *fault; | 566 | u8 *fault; |
564 | u8 *end; | 567 | u8 *end; |
@@ -692,7 +695,7 @@ static int check_object(struct kmem_cache *s, struct page *page, | |||
692 | (!check_bytes_and_report(s, page, p, "Poison", p, | 695 | (!check_bytes_and_report(s, page, p, "Poison", p, |
693 | POISON_FREE, s->objsize - 1) || | 696 | POISON_FREE, s->objsize - 1) || |
694 | !check_bytes_and_report(s, page, p, "Poison", | 697 | !check_bytes_and_report(s, page, p, "Poison", |
695 | p + s->objsize -1, POISON_END, 1))) | 698 | p + s->objsize - 1, POISON_END, 1))) |
696 | return 0; | 699 | return 0; |
697 | /* | 700 | /* |
698 | * check_pad_bytes cleans up on its own. | 701 | * check_pad_bytes cleans up on its own. |
@@ -900,8 +903,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page, | |||
900 | "SLUB <none>: no slab for object 0x%p.\n", | 903 | "SLUB <none>: no slab for object 0x%p.\n", |
901 | object); | 904 | object); |
902 | dump_stack(); | 905 | dump_stack(); |
903 | } | 906 | } else |
904 | else | ||
905 | object_err(s, page, object, | 907 | object_err(s, page, object, |
906 | "page slab pointer corrupt."); | 908 | "page slab pointer corrupt."); |
907 | goto fail; | 909 | goto fail; |
@@ -947,7 +949,7 @@ static int __init setup_slub_debug(char *str) | |||
947 | /* | 949 | /* |
948 | * Determine which debug features should be switched on | 950 | * Determine which debug features should be switched on |
949 | */ | 951 | */ |
950 | for ( ;*str && *str != ','; str++) { | 952 | for (; *str && *str != ','; str++) { |
951 | switch (tolower(*str)) { | 953 | switch (tolower(*str)) { |
952 | case 'f': | 954 | case 'f': |
953 | slub_debug |= SLAB_DEBUG_FREE; | 955 | slub_debug |= SLAB_DEBUG_FREE; |
@@ -966,7 +968,7 @@ static int __init setup_slub_debug(char *str) | |||
966 | break; | 968 | break; |
967 | default: | 969 | default: |
968 | printk(KERN_ERR "slub_debug option '%c' " | 970 | printk(KERN_ERR "slub_debug option '%c' " |
969 | "unknown. skipped\n",*str); | 971 | "unknown. skipped\n", *str); |
970 | } | 972 | } |
971 | } | 973 | } |
972 | 974 | ||
@@ -1039,7 +1041,7 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize, | |||
1039 | */ | 1041 | */ |
1040 | static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | 1042 | static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) |
1041 | { | 1043 | { |
1042 | struct page * page; | 1044 | struct page *page; |
1043 | int pages = 1 << s->order; | 1045 | int pages = 1 << s->order; |
1044 | 1046 | ||
1045 | if (s->order) | 1047 | if (s->order) |
@@ -1135,7 +1137,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1135 | mod_zone_page_state(page_zone(page), | 1137 | mod_zone_page_state(page_zone(page), |
1136 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? | 1138 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
1137 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, | 1139 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, |
1138 | - pages); | 1140 | -pages); |
1139 | 1141 | ||
1140 | __free_pages(page, s->order); | 1142 | __free_pages(page, s->order); |
1141 | } | 1143 | } |
@@ -1195,19 +1197,15 @@ static __always_inline int slab_trylock(struct page *page) | |||
1195 | /* | 1197 | /* |
1196 | * Management of partially allocated slabs | 1198 | * Management of partially allocated slabs |
1197 | */ | 1199 | */ |
1198 | static void add_partial_tail(struct kmem_cache_node *n, struct page *page) | 1200 | static void add_partial(struct kmem_cache_node *n, |
1199 | { | 1201 | struct page *page, int tail) |
1200 | spin_lock(&n->list_lock); | ||
1201 | n->nr_partial++; | ||
1202 | list_add_tail(&page->lru, &n->partial); | ||
1203 | spin_unlock(&n->list_lock); | ||
1204 | } | ||
1205 | |||
1206 | static void add_partial(struct kmem_cache_node *n, struct page *page) | ||
1207 | { | 1202 | { |
1208 | spin_lock(&n->list_lock); | 1203 | spin_lock(&n->list_lock); |
1209 | n->nr_partial++; | 1204 | n->nr_partial++; |
1210 | list_add(&page->lru, &n->partial); | 1205 | if (tail) |
1206 | list_add_tail(&page->lru, &n->partial); | ||
1207 | else | ||
1208 | list_add(&page->lru, &n->partial); | ||
1211 | spin_unlock(&n->list_lock); | 1209 | spin_unlock(&n->list_lock); |
1212 | } | 1210 | } |
1213 | 1211 | ||
@@ -1292,7 +1290,8 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
1292 | * expensive if we do it every time we are trying to find a slab | 1290 | * expensive if we do it every time we are trying to find a slab |
1293 | * with available objects. | 1291 | * with available objects. |
1294 | */ | 1292 | */ |
1295 | if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio) | 1293 | if (!s->remote_node_defrag_ratio || |
1294 | get_cycles() % 1024 > s->remote_node_defrag_ratio) | ||
1296 | return NULL; | 1295 | return NULL; |
1297 | 1296 | ||
1298 | zonelist = &NODE_DATA(slab_node(current->mempolicy)) | 1297 | zonelist = &NODE_DATA(slab_node(current->mempolicy)) |
@@ -1335,7 +1334,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) | |||
1335 | * | 1334 | * |
1336 | * On exit the slab lock will have been dropped. | 1335 | * On exit the slab lock will have been dropped. |
1337 | */ | 1336 | */ |
1338 | static void unfreeze_slab(struct kmem_cache *s, struct page *page) | 1337 | static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) |
1339 | { | 1338 | { |
1340 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1339 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
1341 | 1340 | ||
@@ -1343,7 +1342,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page) | |||
1343 | if (page->inuse) { | 1342 | if (page->inuse) { |
1344 | 1343 | ||
1345 | if (page->freelist) | 1344 | if (page->freelist) |
1346 | add_partial(n, page); | 1345 | add_partial(n, page, tail); |
1347 | else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) | 1346 | else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) |
1348 | add_full(n, page); | 1347 | add_full(n, page); |
1349 | slab_unlock(page); | 1348 | slab_unlock(page); |
@@ -1358,7 +1357,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page) | |||
1358 | * partial list stays small. kmem_cache_shrink can | 1357 | * partial list stays small. kmem_cache_shrink can |
1359 | * reclaim empty slabs from the partial list. | 1358 | * reclaim empty slabs from the partial list. |
1360 | */ | 1359 | */ |
1361 | add_partial_tail(n, page); | 1360 | add_partial(n, page, 1); |
1362 | slab_unlock(page); | 1361 | slab_unlock(page); |
1363 | } else { | 1362 | } else { |
1364 | slab_unlock(page); | 1363 | slab_unlock(page); |
@@ -1373,6 +1372,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page) | |||
1373 | static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | 1372 | static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |
1374 | { | 1373 | { |
1375 | struct page *page = c->page; | 1374 | struct page *page = c->page; |
1375 | int tail = 1; | ||
1376 | /* | 1376 | /* |
1377 | * Merge cpu freelist into freelist. Typically we get here | 1377 | * Merge cpu freelist into freelist. Typically we get here |
1378 | * because both freelists are empty. So this is unlikely | 1378 | * because both freelists are empty. So this is unlikely |
@@ -1381,6 +1381,8 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | |||
1381 | while (unlikely(c->freelist)) { | 1381 | while (unlikely(c->freelist)) { |
1382 | void **object; | 1382 | void **object; |
1383 | 1383 | ||
1384 | tail = 0; /* Hot objects. Put the slab first */ | ||
1385 | |||
1384 | /* Retrieve object from cpu_freelist */ | 1386 | /* Retrieve object from cpu_freelist */ |
1385 | object = c->freelist; | 1387 | object = c->freelist; |
1386 | c->freelist = c->freelist[c->offset]; | 1388 | c->freelist = c->freelist[c->offset]; |
@@ -1391,7 +1393,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | |||
1391 | page->inuse--; | 1393 | page->inuse--; |
1392 | } | 1394 | } |
1393 | c->page = NULL; | 1395 | c->page = NULL; |
1394 | unfreeze_slab(s, page); | 1396 | unfreeze_slab(s, page, tail); |
1395 | } | 1397 | } |
1396 | 1398 | ||
1397 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | 1399 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |
@@ -1539,7 +1541,7 @@ debug: | |||
1539 | * | 1541 | * |
1540 | * Otherwise we can simply pick the next object from the lockless free list. | 1542 | * Otherwise we can simply pick the next object from the lockless free list. |
1541 | */ | 1543 | */ |
1542 | static void __always_inline *slab_alloc(struct kmem_cache *s, | 1544 | static __always_inline void *slab_alloc(struct kmem_cache *s, |
1543 | gfp_t gfpflags, int node, void *addr) | 1545 | gfp_t gfpflags, int node, void *addr) |
1544 | { | 1546 | { |
1545 | void **object; | 1547 | void **object; |
@@ -1613,7 +1615,7 @@ checks_ok: | |||
1613 | * then add it. | 1615 | * then add it. |
1614 | */ | 1616 | */ |
1615 | if (unlikely(!prior)) | 1617 | if (unlikely(!prior)) |
1616 | add_partial_tail(get_node(s, page_to_nid(page)), page); | 1618 | add_partial(get_node(s, page_to_nid(page)), page, 1); |
1617 | 1619 | ||
1618 | out_unlock: | 1620 | out_unlock: |
1619 | slab_unlock(page); | 1621 | slab_unlock(page); |
@@ -1647,7 +1649,7 @@ debug: | |||
1647 | * If fastpath is not possible then fall back to __slab_free where we deal | 1649 | * If fastpath is not possible then fall back to __slab_free where we deal |
1648 | * with all sorts of special processing. | 1650 | * with all sorts of special processing. |
1649 | */ | 1651 | */ |
1650 | static void __always_inline slab_free(struct kmem_cache *s, | 1652 | static __always_inline void slab_free(struct kmem_cache *s, |
1651 | struct page *page, void *x, void *addr) | 1653 | struct page *page, void *x, void *addr) |
1652 | { | 1654 | { |
1653 | void **object = (void *)x; | 1655 | void **object = (void *)x; |
@@ -1997,6 +1999,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, | |||
1997 | { | 1999 | { |
1998 | struct page *page; | 2000 | struct page *page; |
1999 | struct kmem_cache_node *n; | 2001 | struct kmem_cache_node *n; |
2002 | unsigned long flags; | ||
2000 | 2003 | ||
2001 | BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); | 2004 | BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); |
2002 | 2005 | ||
@@ -2021,7 +2024,14 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, | |||
2021 | #endif | 2024 | #endif |
2022 | init_kmem_cache_node(n); | 2025 | init_kmem_cache_node(n); |
2023 | atomic_long_inc(&n->nr_slabs); | 2026 | atomic_long_inc(&n->nr_slabs); |
2024 | add_partial(n, page); | 2027 | /* |
2028 | * lockdep requires consistent irq usage for each lock | ||
2029 | * so even though there cannot be a race this early in | ||
2030 | * the boot sequence, we still disable irqs. | ||
2031 | */ | ||
2032 | local_irq_save(flags); | ||
2033 | add_partial(n, page, 0); | ||
2034 | local_irq_restore(flags); | ||
2025 | return n; | 2035 | return n; |
2026 | } | 2036 | } |
2027 | 2037 | ||
@@ -2206,7 +2216,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | |||
2206 | 2216 | ||
2207 | s->refcount = 1; | 2217 | s->refcount = 1; |
2208 | #ifdef CONFIG_NUMA | 2218 | #ifdef CONFIG_NUMA |
2209 | s->defrag_ratio = 100; | 2219 | s->remote_node_defrag_ratio = 100; |
2210 | #endif | 2220 | #endif |
2211 | if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) | 2221 | if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) |
2212 | goto error; | 2222 | goto error; |
@@ -2228,7 +2238,7 @@ error: | |||
2228 | */ | 2238 | */ |
2229 | int kmem_ptr_validate(struct kmem_cache *s, const void *object) | 2239 | int kmem_ptr_validate(struct kmem_cache *s, const void *object) |
2230 | { | 2240 | { |
2231 | struct page * page; | 2241 | struct page *page; |
2232 | 2242 | ||
2233 | page = get_object_page(object); | 2243 | page = get_object_page(object); |
2234 | 2244 | ||
@@ -2322,7 +2332,6 @@ void kmem_cache_destroy(struct kmem_cache *s) | |||
2322 | if (kmem_cache_close(s)) | 2332 | if (kmem_cache_close(s)) |
2323 | WARN_ON(1); | 2333 | WARN_ON(1); |
2324 | sysfs_slab_remove(s); | 2334 | sysfs_slab_remove(s); |
2325 | kfree(s); | ||
2326 | } else | 2335 | } else |
2327 | up_write(&slub_lock); | 2336 | up_write(&slub_lock); |
2328 | } | 2337 | } |
@@ -2341,7 +2350,7 @@ static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT]; | |||
2341 | 2350 | ||
2342 | static int __init setup_slub_min_order(char *str) | 2351 | static int __init setup_slub_min_order(char *str) |
2343 | { | 2352 | { |
2344 | get_option (&str, &slub_min_order); | 2353 | get_option(&str, &slub_min_order); |
2345 | 2354 | ||
2346 | return 1; | 2355 | return 1; |
2347 | } | 2356 | } |
@@ -2350,7 +2359,7 @@ __setup("slub_min_order=", setup_slub_min_order); | |||
2350 | 2359 | ||
2351 | static int __init setup_slub_max_order(char *str) | 2360 | static int __init setup_slub_max_order(char *str) |
2352 | { | 2361 | { |
2353 | get_option (&str, &slub_max_order); | 2362 | get_option(&str, &slub_max_order); |
2354 | 2363 | ||
2355 | return 1; | 2364 | return 1; |
2356 | } | 2365 | } |
@@ -2359,7 +2368,7 @@ __setup("slub_max_order=", setup_slub_max_order); | |||
2359 | 2368 | ||
2360 | static int __init setup_slub_min_objects(char *str) | 2369 | static int __init setup_slub_min_objects(char *str) |
2361 | { | 2370 | { |
2362 | get_option (&str, &slub_min_objects); | 2371 | get_option(&str, &slub_min_objects); |
2363 | 2372 | ||
2364 | return 1; | 2373 | return 1; |
2365 | } | 2374 | } |
@@ -2605,6 +2614,19 @@ void kfree(const void *x) | |||
2605 | } | 2614 | } |
2606 | EXPORT_SYMBOL(kfree); | 2615 | EXPORT_SYMBOL(kfree); |
2607 | 2616 | ||
2617 | static unsigned long count_partial(struct kmem_cache_node *n) | ||
2618 | { | ||
2619 | unsigned long flags; | ||
2620 | unsigned long x = 0; | ||
2621 | struct page *page; | ||
2622 | |||
2623 | spin_lock_irqsave(&n->list_lock, flags); | ||
2624 | list_for_each_entry(page, &n->partial, lru) | ||
2625 | x += page->inuse; | ||
2626 | spin_unlock_irqrestore(&n->list_lock, flags); | ||
2627 | return x; | ||
2628 | } | ||
2629 | |||
2608 | /* | 2630 | /* |
2609 | * kmem_cache_shrink removes empty slabs from the partial lists and sorts | 2631 | * kmem_cache_shrink removes empty slabs from the partial lists and sorts |
2610 | * the remaining slabs by the number of items in use. The slabs with the | 2632 | * the remaining slabs by the number of items in use. The slabs with the |
@@ -2931,7 +2953,7 @@ static struct kmem_cache *find_mergeable(size_t size, | |||
2931 | * Check if alignment is compatible. | 2953 | * Check if alignment is compatible. |
2932 | * Courtesy of Adrian Drzewiecki | 2954 | * Courtesy of Adrian Drzewiecki |
2933 | */ | 2955 | */ |
2934 | if ((s->size & ~(align -1)) != s->size) | 2956 | if ((s->size & ~(align - 1)) != s->size) |
2935 | continue; | 2957 | continue; |
2936 | 2958 | ||
2937 | if (s->size - size >= sizeof(void *)) | 2959 | if (s->size - size >= sizeof(void *)) |
@@ -3040,8 +3062,9 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, | |||
3040 | return NOTIFY_OK; | 3062 | return NOTIFY_OK; |
3041 | } | 3063 | } |
3042 | 3064 | ||
3043 | static struct notifier_block __cpuinitdata slab_notifier = | 3065 | static struct notifier_block __cpuinitdata slab_notifier = { |
3044 | { &slab_cpuup_callback, NULL, 0 }; | 3066 | &slab_cpuup_callback, NULL, 0 |
3067 | }; | ||
3045 | 3068 | ||
3046 | #endif | 3069 | #endif |
3047 | 3070 | ||
@@ -3076,19 +3099,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3076 | return slab_alloc(s, gfpflags, node, caller); | 3099 | return slab_alloc(s, gfpflags, node, caller); |
3077 | } | 3100 | } |
3078 | 3101 | ||
3079 | static unsigned long count_partial(struct kmem_cache_node *n) | ||
3080 | { | ||
3081 | unsigned long flags; | ||
3082 | unsigned long x = 0; | ||
3083 | struct page *page; | ||
3084 | |||
3085 | spin_lock_irqsave(&n->list_lock, flags); | ||
3086 | list_for_each_entry(page, &n->partial, lru) | ||
3087 | x += page->inuse; | ||
3088 | spin_unlock_irqrestore(&n->list_lock, flags); | ||
3089 | return x; | ||
3090 | } | ||
3091 | |||
3092 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) | 3102 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) |
3093 | static int validate_slab(struct kmem_cache *s, struct page *page, | 3103 | static int validate_slab(struct kmem_cache *s, struct page *page, |
3094 | unsigned long *map) | 3104 | unsigned long *map) |
@@ -3390,7 +3400,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s, | |||
3390 | static int list_locations(struct kmem_cache *s, char *buf, | 3400 | static int list_locations(struct kmem_cache *s, char *buf, |
3391 | enum track_item alloc) | 3401 | enum track_item alloc) |
3392 | { | 3402 | { |
3393 | int n = 0; | 3403 | int len = 0; |
3394 | unsigned long i; | 3404 | unsigned long i; |
3395 | struct loc_track t = { 0, 0, NULL }; | 3405 | struct loc_track t = { 0, 0, NULL }; |
3396 | int node; | 3406 | int node; |
@@ -3421,54 +3431,54 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
3421 | for (i = 0; i < t.count; i++) { | 3431 | for (i = 0; i < t.count; i++) { |
3422 | struct location *l = &t.loc[i]; | 3432 | struct location *l = &t.loc[i]; |
3423 | 3433 | ||
3424 | if (n > PAGE_SIZE - 100) | 3434 | if (len > PAGE_SIZE - 100) |
3425 | break; | 3435 | break; |
3426 | n += sprintf(buf + n, "%7ld ", l->count); | 3436 | len += sprintf(buf + len, "%7ld ", l->count); |
3427 | 3437 | ||
3428 | if (l->addr) | 3438 | if (l->addr) |
3429 | n += sprint_symbol(buf + n, (unsigned long)l->addr); | 3439 | len += sprint_symbol(buf + len, (unsigned long)l->addr); |
3430 | else | 3440 | else |
3431 | n += sprintf(buf + n, "<not-available>"); | 3441 | len += sprintf(buf + len, "<not-available>"); |
3432 | 3442 | ||
3433 | if (l->sum_time != l->min_time) { | 3443 | if (l->sum_time != l->min_time) { |
3434 | unsigned long remainder; | 3444 | unsigned long remainder; |
3435 | 3445 | ||
3436 | n += sprintf(buf + n, " age=%ld/%ld/%ld", | 3446 | len += sprintf(buf + len, " age=%ld/%ld/%ld", |
3437 | l->min_time, | 3447 | l->min_time, |
3438 | div_long_long_rem(l->sum_time, l->count, &remainder), | 3448 | div_long_long_rem(l->sum_time, l->count, &remainder), |
3439 | l->max_time); | 3449 | l->max_time); |
3440 | } else | 3450 | } else |
3441 | n += sprintf(buf + n, " age=%ld", | 3451 | len += sprintf(buf + len, " age=%ld", |
3442 | l->min_time); | 3452 | l->min_time); |
3443 | 3453 | ||
3444 | if (l->min_pid != l->max_pid) | 3454 | if (l->min_pid != l->max_pid) |
3445 | n += sprintf(buf + n, " pid=%ld-%ld", | 3455 | len += sprintf(buf + len, " pid=%ld-%ld", |
3446 | l->min_pid, l->max_pid); | 3456 | l->min_pid, l->max_pid); |
3447 | else | 3457 | else |
3448 | n += sprintf(buf + n, " pid=%ld", | 3458 | len += sprintf(buf + len, " pid=%ld", |
3449 | l->min_pid); | 3459 | l->min_pid); |
3450 | 3460 | ||
3451 | if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && | 3461 | if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && |
3452 | n < PAGE_SIZE - 60) { | 3462 | len < PAGE_SIZE - 60) { |
3453 | n += sprintf(buf + n, " cpus="); | 3463 | len += sprintf(buf + len, " cpus="); |
3454 | n += cpulist_scnprintf(buf + n, PAGE_SIZE - n - 50, | 3464 | len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, |
3455 | l->cpus); | 3465 | l->cpus); |
3456 | } | 3466 | } |
3457 | 3467 | ||
3458 | if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && | 3468 | if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && |
3459 | n < PAGE_SIZE - 60) { | 3469 | len < PAGE_SIZE - 60) { |
3460 | n += sprintf(buf + n, " nodes="); | 3470 | len += sprintf(buf + len, " nodes="); |
3461 | n += nodelist_scnprintf(buf + n, PAGE_SIZE - n - 50, | 3471 | len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, |
3462 | l->nodes); | 3472 | l->nodes); |
3463 | } | 3473 | } |
3464 | 3474 | ||
3465 | n += sprintf(buf + n, "\n"); | 3475 | len += sprintf(buf + len, "\n"); |
3466 | } | 3476 | } |
3467 | 3477 | ||
3468 | free_loc_track(&t); | 3478 | free_loc_track(&t); |
3469 | if (!t.count) | 3479 | if (!t.count) |
3470 | n += sprintf(buf, "No data\n"); | 3480 | len += sprintf(buf, "No data\n"); |
3471 | return n; | 3481 | return len; |
3472 | } | 3482 | } |
3473 | 3483 | ||
3474 | enum slab_stat_type { | 3484 | enum slab_stat_type { |
@@ -3498,7 +3508,6 @@ static unsigned long slab_objects(struct kmem_cache *s, | |||
3498 | 3508 | ||
3499 | for_each_possible_cpu(cpu) { | 3509 | for_each_possible_cpu(cpu) { |
3500 | struct page *page; | 3510 | struct page *page; |
3501 | int node; | ||
3502 | struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); | 3511 | struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); |
3503 | 3512 | ||
3504 | if (!c) | 3513 | if (!c) |
@@ -3510,8 +3519,6 @@ static unsigned long slab_objects(struct kmem_cache *s, | |||
3510 | continue; | 3519 | continue; |
3511 | if (page) { | 3520 | if (page) { |
3512 | if (flags & SO_CPU) { | 3521 | if (flags & SO_CPU) { |
3513 | int x = 0; | ||
3514 | |||
3515 | if (flags & SO_OBJECTS) | 3522 | if (flags & SO_OBJECTS) |
3516 | x = page->inuse; | 3523 | x = page->inuse; |
3517 | else | 3524 | else |
@@ -3848,24 +3855,24 @@ static ssize_t free_calls_show(struct kmem_cache *s, char *buf) | |||
3848 | SLAB_ATTR_RO(free_calls); | 3855 | SLAB_ATTR_RO(free_calls); |
3849 | 3856 | ||
3850 | #ifdef CONFIG_NUMA | 3857 | #ifdef CONFIG_NUMA |
3851 | static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf) | 3858 | static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) |
3852 | { | 3859 | { |
3853 | return sprintf(buf, "%d\n", s->defrag_ratio / 10); | 3860 | return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); |
3854 | } | 3861 | } |
3855 | 3862 | ||
3856 | static ssize_t defrag_ratio_store(struct kmem_cache *s, | 3863 | static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, |
3857 | const char *buf, size_t length) | 3864 | const char *buf, size_t length) |
3858 | { | 3865 | { |
3859 | int n = simple_strtoul(buf, NULL, 10); | 3866 | int n = simple_strtoul(buf, NULL, 10); |
3860 | 3867 | ||
3861 | if (n < 100) | 3868 | if (n < 100) |
3862 | s->defrag_ratio = n * 10; | 3869 | s->remote_node_defrag_ratio = n * 10; |
3863 | return length; | 3870 | return length; |
3864 | } | 3871 | } |
3865 | SLAB_ATTR(defrag_ratio); | 3872 | SLAB_ATTR(remote_node_defrag_ratio); |
3866 | #endif | 3873 | #endif |
3867 | 3874 | ||
3868 | static struct attribute * slab_attrs[] = { | 3875 | static struct attribute *slab_attrs[] = { |
3869 | &slab_size_attr.attr, | 3876 | &slab_size_attr.attr, |
3870 | &object_size_attr.attr, | 3877 | &object_size_attr.attr, |
3871 | &objs_per_slab_attr.attr, | 3878 | &objs_per_slab_attr.attr, |
@@ -3893,7 +3900,7 @@ static struct attribute * slab_attrs[] = { | |||
3893 | &cache_dma_attr.attr, | 3900 | &cache_dma_attr.attr, |
3894 | #endif | 3901 | #endif |
3895 | #ifdef CONFIG_NUMA | 3902 | #ifdef CONFIG_NUMA |
3896 | &defrag_ratio_attr.attr, | 3903 | &remote_node_defrag_ratio_attr.attr, |
3897 | #endif | 3904 | #endif |
3898 | NULL | 3905 | NULL |
3899 | }; | 3906 | }; |
@@ -3940,6 +3947,13 @@ static ssize_t slab_attr_store(struct kobject *kobj, | |||
3940 | return err; | 3947 | return err; |
3941 | } | 3948 | } |
3942 | 3949 | ||
3950 | static void kmem_cache_release(struct kobject *kobj) | ||
3951 | { | ||
3952 | struct kmem_cache *s = to_slab(kobj); | ||
3953 | |||
3954 | kfree(s); | ||
3955 | } | ||
3956 | |||
3943 | static struct sysfs_ops slab_sysfs_ops = { | 3957 | static struct sysfs_ops slab_sysfs_ops = { |
3944 | .show = slab_attr_show, | 3958 | .show = slab_attr_show, |
3945 | .store = slab_attr_store, | 3959 | .store = slab_attr_store, |
@@ -3947,6 +3961,7 @@ static struct sysfs_ops slab_sysfs_ops = { | |||
3947 | 3961 | ||
3948 | static struct kobj_type slab_ktype = { | 3962 | static struct kobj_type slab_ktype = { |
3949 | .sysfs_ops = &slab_sysfs_ops, | 3963 | .sysfs_ops = &slab_sysfs_ops, |
3964 | .release = kmem_cache_release | ||
3950 | }; | 3965 | }; |
3951 | 3966 | ||
3952 | static int uevent_filter(struct kset *kset, struct kobject *kobj) | 3967 | static int uevent_filter(struct kset *kset, struct kobject *kobj) |
@@ -4048,6 +4063,7 @@ static void sysfs_slab_remove(struct kmem_cache *s) | |||
4048 | { | 4063 | { |
4049 | kobject_uevent(&s->kobj, KOBJ_REMOVE); | 4064 | kobject_uevent(&s->kobj, KOBJ_REMOVE); |
4050 | kobject_del(&s->kobj); | 4065 | kobject_del(&s->kobj); |
4066 | kobject_put(&s->kobj); | ||
4051 | } | 4067 | } |
4052 | 4068 | ||
4053 | /* | 4069 | /* |