diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 101 |
1 files changed, 57 insertions, 44 deletions
@@ -153,6 +153,10 @@ | |||
153 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | 153 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) |
154 | #endif | 154 | #endif |
155 | 155 | ||
156 | #define OO_SHIFT 16 | ||
157 | #define OO_MASK ((1 << OO_SHIFT) - 1) | ||
158 | #define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ | ||
159 | |||
156 | /* Internal SLUB flags */ | 160 | /* Internal SLUB flags */ |
157 | #define __OBJECT_POISON 0x80000000 /* Poison object */ | 161 | #define __OBJECT_POISON 0x80000000 /* Poison object */ |
158 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ | 162 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ |
@@ -178,7 +182,7 @@ static LIST_HEAD(slab_caches); | |||
178 | * Tracking user of a slab. | 182 | * Tracking user of a slab. |
179 | */ | 183 | */ |
180 | struct track { | 184 | struct track { |
181 | void *addr; /* Called from address */ | 185 | unsigned long addr; /* Called from address */ |
182 | int cpu; /* Was running on cpu */ | 186 | int cpu; /* Was running on cpu */ |
183 | int pid; /* Pid context */ | 187 | int pid; /* Pid context */ |
184 | unsigned long when; /* When did the operation occur */ | 188 | unsigned long when; /* When did the operation occur */ |
@@ -290,7 +294,7 @@ static inline struct kmem_cache_order_objects oo_make(int order, | |||
290 | unsigned long size) | 294 | unsigned long size) |
291 | { | 295 | { |
292 | struct kmem_cache_order_objects x = { | 296 | struct kmem_cache_order_objects x = { |
293 | (order << 16) + (PAGE_SIZE << order) / size | 297 | (order << OO_SHIFT) + (PAGE_SIZE << order) / size |
294 | }; | 298 | }; |
295 | 299 | ||
296 | return x; | 300 | return x; |
@@ -298,12 +302,12 @@ static inline struct kmem_cache_order_objects oo_make(int order, | |||
298 | 302 | ||
299 | static inline int oo_order(struct kmem_cache_order_objects x) | 303 | static inline int oo_order(struct kmem_cache_order_objects x) |
300 | { | 304 | { |
301 | return x.x >> 16; | 305 | return x.x >> OO_SHIFT; |
302 | } | 306 | } |
303 | 307 | ||
304 | static inline int oo_objects(struct kmem_cache_order_objects x) | 308 | static inline int oo_objects(struct kmem_cache_order_objects x) |
305 | { | 309 | { |
306 | return x.x & ((1 << 16) - 1); | 310 | return x.x & OO_MASK; |
307 | } | 311 | } |
308 | 312 | ||
309 | #ifdef CONFIG_SLUB_DEBUG | 313 | #ifdef CONFIG_SLUB_DEBUG |
@@ -367,7 +371,7 @@ static struct track *get_track(struct kmem_cache *s, void *object, | |||
367 | } | 371 | } |
368 | 372 | ||
369 | static void set_track(struct kmem_cache *s, void *object, | 373 | static void set_track(struct kmem_cache *s, void *object, |
370 | enum track_item alloc, void *addr) | 374 | enum track_item alloc, unsigned long addr) |
371 | { | 375 | { |
372 | struct track *p; | 376 | struct track *p; |
373 | 377 | ||
@@ -391,8 +395,8 @@ static void init_tracking(struct kmem_cache *s, void *object) | |||
391 | if (!(s->flags & SLAB_STORE_USER)) | 395 | if (!(s->flags & SLAB_STORE_USER)) |
392 | return; | 396 | return; |
393 | 397 | ||
394 | set_track(s, object, TRACK_FREE, NULL); | 398 | set_track(s, object, TRACK_FREE, 0UL); |
395 | set_track(s, object, TRACK_ALLOC, NULL); | 399 | set_track(s, object, TRACK_ALLOC, 0UL); |
396 | } | 400 | } |
397 | 401 | ||
398 | static void print_track(const char *s, struct track *t) | 402 | static void print_track(const char *s, struct track *t) |
@@ -401,7 +405,7 @@ static void print_track(const char *s, struct track *t) | |||
401 | return; | 405 | return; |
402 | 406 | ||
403 | printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", | 407 | printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", |
404 | s, t->addr, jiffies - t->when, t->cpu, t->pid); | 408 | s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); |
405 | } | 409 | } |
406 | 410 | ||
407 | static void print_tracking(struct kmem_cache *s, void *object) | 411 | static void print_tracking(struct kmem_cache *s, void *object) |
@@ -692,7 +696,7 @@ static int check_object(struct kmem_cache *s, struct page *page, | |||
692 | if (!check_valid_pointer(s, page, get_freepointer(s, p))) { | 696 | if (!check_valid_pointer(s, page, get_freepointer(s, p))) { |
693 | object_err(s, page, p, "Freepointer corrupt"); | 697 | object_err(s, page, p, "Freepointer corrupt"); |
694 | /* | 698 | /* |
695 | * No choice but to zap it and thus loose the remainder | 699 | * No choice but to zap it and thus lose the remainder |
696 | * of the free objects in this slab. May cause | 700 | * of the free objects in this slab. May cause |
697 | * another error because the object count is now wrong. | 701 | * another error because the object count is now wrong. |
698 | */ | 702 | */ |
@@ -764,8 +768,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) | |||
764 | } | 768 | } |
765 | 769 | ||
766 | max_objects = (PAGE_SIZE << compound_order(page)) / s->size; | 770 | max_objects = (PAGE_SIZE << compound_order(page)) / s->size; |
767 | if (max_objects > 65535) | 771 | if (max_objects > MAX_OBJS_PER_PAGE) |
768 | max_objects = 65535; | 772 | max_objects = MAX_OBJS_PER_PAGE; |
769 | 773 | ||
770 | if (page->objects != max_objects) { | 774 | if (page->objects != max_objects) { |
771 | slab_err(s, page, "Wrong number of objects. Found %d but " | 775 | slab_err(s, page, "Wrong number of objects. Found %d but " |
@@ -866,7 +870,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, | |||
866 | } | 870 | } |
867 | 871 | ||
868 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, | 872 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, |
869 | void *object, void *addr) | 873 | void *object, unsigned long addr) |
870 | { | 874 | { |
871 | if (!check_slab(s, page)) | 875 | if (!check_slab(s, page)) |
872 | goto bad; | 876 | goto bad; |
@@ -906,7 +910,7 @@ bad: | |||
906 | } | 910 | } |
907 | 911 | ||
908 | static int free_debug_processing(struct kmem_cache *s, struct page *page, | 912 | static int free_debug_processing(struct kmem_cache *s, struct page *page, |
909 | void *object, void *addr) | 913 | void *object, unsigned long addr) |
910 | { | 914 | { |
911 | if (!check_slab(s, page)) | 915 | if (!check_slab(s, page)) |
912 | goto fail; | 916 | goto fail; |
@@ -1029,10 +1033,10 @@ static inline void setup_object_debug(struct kmem_cache *s, | |||
1029 | struct page *page, void *object) {} | 1033 | struct page *page, void *object) {} |
1030 | 1034 | ||
1031 | static inline int alloc_debug_processing(struct kmem_cache *s, | 1035 | static inline int alloc_debug_processing(struct kmem_cache *s, |
1032 | struct page *page, void *object, void *addr) { return 0; } | 1036 | struct page *page, void *object, unsigned long addr) { return 0; } |
1033 | 1037 | ||
1034 | static inline int free_debug_processing(struct kmem_cache *s, | 1038 | static inline int free_debug_processing(struct kmem_cache *s, |
1035 | struct page *page, void *object, void *addr) { return 0; } | 1039 | struct page *page, void *object, unsigned long addr) { return 0; } |
1036 | 1040 | ||
1037 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 1041 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
1038 | { return 1; } | 1042 | { return 1; } |
@@ -1499,8 +1503,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) | |||
1499 | * we need to allocate a new slab. This is the slowest path since it involves | 1503 | * we need to allocate a new slab. This is the slowest path since it involves |
1500 | * a call to the page allocator and the setup of a new slab. | 1504 | * a call to the page allocator and the setup of a new slab. |
1501 | */ | 1505 | */ |
1502 | static void *__slab_alloc(struct kmem_cache *s, | 1506 | static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
1503 | gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) | 1507 | unsigned long addr, struct kmem_cache_cpu *c) |
1504 | { | 1508 | { |
1505 | void **object; | 1509 | void **object; |
1506 | struct page *new; | 1510 | struct page *new; |
@@ -1584,13 +1588,14 @@ debug: | |||
1584 | * Otherwise we can simply pick the next object from the lockless free list. | 1588 | * Otherwise we can simply pick the next object from the lockless free list. |
1585 | */ | 1589 | */ |
1586 | static __always_inline void *slab_alloc(struct kmem_cache *s, | 1590 | static __always_inline void *slab_alloc(struct kmem_cache *s, |
1587 | gfp_t gfpflags, int node, void *addr) | 1591 | gfp_t gfpflags, int node, unsigned long addr) |
1588 | { | 1592 | { |
1589 | void **object; | 1593 | void **object; |
1590 | struct kmem_cache_cpu *c; | 1594 | struct kmem_cache_cpu *c; |
1591 | unsigned long flags; | 1595 | unsigned long flags; |
1592 | unsigned int objsize; | 1596 | unsigned int objsize; |
1593 | 1597 | ||
1598 | might_sleep_if(gfpflags & __GFP_WAIT); | ||
1594 | local_irq_save(flags); | 1599 | local_irq_save(flags); |
1595 | c = get_cpu_slab(s, smp_processor_id()); | 1600 | c = get_cpu_slab(s, smp_processor_id()); |
1596 | objsize = c->objsize; | 1601 | objsize = c->objsize; |
@@ -1613,14 +1618,14 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1613 | 1618 | ||
1614 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1619 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
1615 | { | 1620 | { |
1616 | return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); | 1621 | return slab_alloc(s, gfpflags, -1, _RET_IP_); |
1617 | } | 1622 | } |
1618 | EXPORT_SYMBOL(kmem_cache_alloc); | 1623 | EXPORT_SYMBOL(kmem_cache_alloc); |
1619 | 1624 | ||
1620 | #ifdef CONFIG_NUMA | 1625 | #ifdef CONFIG_NUMA |
1621 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1626 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
1622 | { | 1627 | { |
1623 | return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); | 1628 | return slab_alloc(s, gfpflags, node, _RET_IP_); |
1624 | } | 1629 | } |
1625 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1630 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1626 | #endif | 1631 | #endif |
@@ -1634,7 +1639,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); | |||
1634 | * handling required then we can return immediately. | 1639 | * handling required then we can return immediately. |
1635 | */ | 1640 | */ |
1636 | static void __slab_free(struct kmem_cache *s, struct page *page, | 1641 | static void __slab_free(struct kmem_cache *s, struct page *page, |
1637 | void *x, void *addr, unsigned int offset) | 1642 | void *x, unsigned long addr, unsigned int offset) |
1638 | { | 1643 | { |
1639 | void *prior; | 1644 | void *prior; |
1640 | void **object = (void *)x; | 1645 | void **object = (void *)x; |
@@ -1704,7 +1709,7 @@ debug: | |||
1704 | * with all sorts of special processing. | 1709 | * with all sorts of special processing. |
1705 | */ | 1710 | */ |
1706 | static __always_inline void slab_free(struct kmem_cache *s, | 1711 | static __always_inline void slab_free(struct kmem_cache *s, |
1707 | struct page *page, void *x, void *addr) | 1712 | struct page *page, void *x, unsigned long addr) |
1708 | { | 1713 | { |
1709 | void **object = (void *)x; | 1714 | void **object = (void *)x; |
1710 | struct kmem_cache_cpu *c; | 1715 | struct kmem_cache_cpu *c; |
@@ -1731,11 +1736,11 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1731 | 1736 | ||
1732 | page = virt_to_head_page(x); | 1737 | page = virt_to_head_page(x); |
1733 | 1738 | ||
1734 | slab_free(s, page, x, __builtin_return_address(0)); | 1739 | slab_free(s, page, x, _RET_IP_); |
1735 | } | 1740 | } |
1736 | EXPORT_SYMBOL(kmem_cache_free); | 1741 | EXPORT_SYMBOL(kmem_cache_free); |
1737 | 1742 | ||
1738 | /* Figure out on which slab object the object resides */ | 1743 | /* Figure out on which slab page the object resides */ |
1739 | static struct page *get_object_page(const void *x) | 1744 | static struct page *get_object_page(const void *x) |
1740 | { | 1745 | { |
1741 | struct page *page = virt_to_head_page(x); | 1746 | struct page *page = virt_to_head_page(x); |
@@ -1807,8 +1812,8 @@ static inline int slab_order(int size, int min_objects, | |||
1807 | int rem; | 1812 | int rem; |
1808 | int min_order = slub_min_order; | 1813 | int min_order = slub_min_order; |
1809 | 1814 | ||
1810 | if ((PAGE_SIZE << min_order) / size > 65535) | 1815 | if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE) |
1811 | return get_order(size * 65535) - 1; | 1816 | return get_order(size * MAX_OBJS_PER_PAGE) - 1; |
1812 | 1817 | ||
1813 | for (order = max(min_order, | 1818 | for (order = max(min_order, |
1814 | fls(min_objects * size - 1) - PAGE_SHIFT); | 1819 | fls(min_objects * size - 1) - PAGE_SHIFT); |
@@ -2073,8 +2078,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) | |||
2073 | * when allocating for the kmalloc_node_cache. This is used for bootstrapping | 2078 | * when allocating for the kmalloc_node_cache. This is used for bootstrapping |
2074 | * memory on a fresh node that has no slab structures yet. | 2079 | * memory on a fresh node that has no slab structures yet. |
2075 | */ | 2080 | */ |
2076 | static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, | 2081 | static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) |
2077 | int node) | ||
2078 | { | 2082 | { |
2079 | struct page *page; | 2083 | struct page *page; |
2080 | struct kmem_cache_node *n; | 2084 | struct kmem_cache_node *n; |
@@ -2112,7 +2116,6 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, | |||
2112 | local_irq_save(flags); | 2116 | local_irq_save(flags); |
2113 | add_partial(n, page, 0); | 2117 | add_partial(n, page, 0); |
2114 | local_irq_restore(flags); | 2118 | local_irq_restore(flags); |
2115 | return n; | ||
2116 | } | 2119 | } |
2117 | 2120 | ||
2118 | static void free_kmem_cache_nodes(struct kmem_cache *s) | 2121 | static void free_kmem_cache_nodes(struct kmem_cache *s) |
@@ -2144,8 +2147,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | |||
2144 | n = &s->local_node; | 2147 | n = &s->local_node; |
2145 | else { | 2148 | else { |
2146 | if (slab_state == DOWN) { | 2149 | if (slab_state == DOWN) { |
2147 | n = early_kmem_cache_node_alloc(gfpflags, | 2150 | early_kmem_cache_node_alloc(gfpflags, node); |
2148 | node); | ||
2149 | continue; | 2151 | continue; |
2150 | } | 2152 | } |
2151 | n = kmem_cache_alloc_node(kmalloc_caches, | 2153 | n = kmem_cache_alloc_node(kmalloc_caches, |
@@ -2659,7 +2661,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2659 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2661 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2660 | return s; | 2662 | return s; |
2661 | 2663 | ||
2662 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); | 2664 | return slab_alloc(s, flags, -1, _RET_IP_); |
2663 | } | 2665 | } |
2664 | EXPORT_SYMBOL(__kmalloc); | 2666 | EXPORT_SYMBOL(__kmalloc); |
2665 | 2667 | ||
@@ -2687,7 +2689,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
2687 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2689 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2688 | return s; | 2690 | return s; |
2689 | 2691 | ||
2690 | return slab_alloc(s, flags, node, __builtin_return_address(0)); | 2692 | return slab_alloc(s, flags, node, _RET_IP_); |
2691 | } | 2693 | } |
2692 | EXPORT_SYMBOL(__kmalloc_node); | 2694 | EXPORT_SYMBOL(__kmalloc_node); |
2693 | #endif | 2695 | #endif |
@@ -2744,7 +2746,7 @@ void kfree(const void *x) | |||
2744 | put_page(page); | 2746 | put_page(page); |
2745 | return; | 2747 | return; |
2746 | } | 2748 | } |
2747 | slab_free(page->slab, page, object, __builtin_return_address(0)); | 2749 | slab_free(page->slab, page, object, _RET_IP_); |
2748 | } | 2750 | } |
2749 | EXPORT_SYMBOL(kfree); | 2751 | EXPORT_SYMBOL(kfree); |
2750 | 2752 | ||
@@ -2931,8 +2933,10 @@ static int slab_memory_callback(struct notifier_block *self, | |||
2931 | case MEM_CANCEL_OFFLINE: | 2933 | case MEM_CANCEL_OFFLINE: |
2932 | break; | 2934 | break; |
2933 | } | 2935 | } |
2934 | 2936 | if (ret) | |
2935 | ret = notifier_from_errno(ret); | 2937 | ret = notifier_from_errno(ret); |
2938 | else | ||
2939 | ret = NOTIFY_OK; | ||
2936 | return ret; | 2940 | return ret; |
2937 | } | 2941 | } |
2938 | 2942 | ||
@@ -3121,8 +3125,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3121 | s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); | 3125 | s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); |
3122 | up_write(&slub_lock); | 3126 | up_write(&slub_lock); |
3123 | 3127 | ||
3124 | if (sysfs_slab_alias(s, name)) | 3128 | if (sysfs_slab_alias(s, name)) { |
3129 | down_write(&slub_lock); | ||
3130 | s->refcount--; | ||
3131 | up_write(&slub_lock); | ||
3125 | goto err; | 3132 | goto err; |
3133 | } | ||
3126 | return s; | 3134 | return s; |
3127 | } | 3135 | } |
3128 | 3136 | ||
@@ -3132,8 +3140,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3132 | size, align, flags, ctor)) { | 3140 | size, align, flags, ctor)) { |
3133 | list_add(&s->list, &slab_caches); | 3141 | list_add(&s->list, &slab_caches); |
3134 | up_write(&slub_lock); | 3142 | up_write(&slub_lock); |
3135 | if (sysfs_slab_add(s)) | 3143 | if (sysfs_slab_add(s)) { |
3144 | down_write(&slub_lock); | ||
3145 | list_del(&s->list); | ||
3146 | up_write(&slub_lock); | ||
3147 | kfree(s); | ||
3136 | goto err; | 3148 | goto err; |
3149 | } | ||
3137 | return s; | 3150 | return s; |
3138 | } | 3151 | } |
3139 | kfree(s); | 3152 | kfree(s); |
@@ -3200,7 +3213,7 @@ static struct notifier_block __cpuinitdata slab_notifier = { | |||
3200 | 3213 | ||
3201 | #endif | 3214 | #endif |
3202 | 3215 | ||
3203 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | 3216 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
3204 | { | 3217 | { |
3205 | struct kmem_cache *s; | 3218 | struct kmem_cache *s; |
3206 | 3219 | ||
@@ -3216,7 +3229,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
3216 | } | 3229 | } |
3217 | 3230 | ||
3218 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 3231 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
3219 | int node, void *caller) | 3232 | int node, unsigned long caller) |
3220 | { | 3233 | { |
3221 | struct kmem_cache *s; | 3234 | struct kmem_cache *s; |
3222 | 3235 | ||
@@ -3427,7 +3440,7 @@ static void resiliency_test(void) {}; | |||
3427 | 3440 | ||
3428 | struct location { | 3441 | struct location { |
3429 | unsigned long count; | 3442 | unsigned long count; |
3430 | void *addr; | 3443 | unsigned long addr; |
3431 | long long sum_time; | 3444 | long long sum_time; |
3432 | long min_time; | 3445 | long min_time; |
3433 | long max_time; | 3446 | long max_time; |
@@ -3475,7 +3488,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, | |||
3475 | { | 3488 | { |
3476 | long start, end, pos; | 3489 | long start, end, pos; |
3477 | struct location *l; | 3490 | struct location *l; |
3478 | void *caddr; | 3491 | unsigned long caddr; |
3479 | unsigned long age = jiffies - track->when; | 3492 | unsigned long age = jiffies - track->when; |
3480 | 3493 | ||
3481 | start = -1; | 3494 | start = -1; |
@@ -3595,7 +3608,7 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
3595 | for (i = 0; i < t.count; i++) { | 3608 | for (i = 0; i < t.count; i++) { |
3596 | struct location *l = &t.loc[i]; | 3609 | struct location *l = &t.loc[i]; |
3597 | 3610 | ||
3598 | if (len > PAGE_SIZE - 100) | 3611 | if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) |
3599 | break; | 3612 | break; |
3600 | len += sprintf(buf + len, "%7ld ", l->count); | 3613 | len += sprintf(buf + len, "%7ld ", l->count); |
3601 | 3614 | ||
@@ -4343,7 +4356,7 @@ static void sysfs_slab_remove(struct kmem_cache *s) | |||
4343 | 4356 | ||
4344 | /* | 4357 | /* |
4345 | * Need to buffer aliases during bootup until sysfs becomes | 4358 | * Need to buffer aliases during bootup until sysfs becomes |
4346 | * available lest we loose that information. | 4359 | * available lest we lose that information. |
4347 | */ | 4360 | */ |
4348 | struct saved_alias { | 4361 | struct saved_alias { |
4349 | struct kmem_cache *s; | 4362 | struct kmem_cache *s; |