aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c14
-rw-r--r--mm/slub.c94
2 files changed, 61 insertions, 47 deletions
diff --git a/mm/slab.c b/mm/slab.c
index c347dd8480cc..f97e564bdf11 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2123,6 +2123,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2123 * 2123 *
2124 * @name must be valid until the cache is destroyed. This implies that 2124 * @name must be valid until the cache is destroyed. This implies that
2125 * the module calling this has to destroy the cache before getting unloaded. 2125 * the module calling this has to destroy the cache before getting unloaded.
2126 * Note that kmem_cache_name() is not guaranteed to return the same pointer,
2127 * therefore applications must manage it themselves.
2126 * 2128 *
2127 * The flags are 2129 * The flags are
2128 * 2130 *
@@ -2609,7 +2611,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2609 if (OFF_SLAB(cachep)) { 2611 if (OFF_SLAB(cachep)) {
2610 /* Slab management obj is off-slab. */ 2612 /* Slab management obj is off-slab. */
2611 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2613 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2612 local_flags & ~GFP_THISNODE, nodeid); 2614 local_flags, nodeid);
2613 if (!slabp) 2615 if (!slabp)
2614 return NULL; 2616 return NULL;
2615 } else { 2617 } else {
@@ -2997,7 +2999,7 @@ retry:
2997 * there must be at least one object available for 2999 * there must be at least one object available for
2998 * allocation. 3000 * allocation.
2999 */ 3001 */
3000 BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); 3002 BUG_ON(slabp->inuse >= cachep->num);
3001 3003
3002 while (slabp->inuse < cachep->num && batchcount--) { 3004 while (slabp->inuse < cachep->num && batchcount--) {
3003 STATS_INC_ALLOCED(cachep); 3005 STATS_INC_ALLOCED(cachep);
@@ -3621,9 +3623,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3621EXPORT_SYMBOL(__kmalloc_node); 3623EXPORT_SYMBOL(__kmalloc_node);
3622 3624
3623void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3625void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3624 int node, void *caller) 3626 int node, unsigned long caller)
3625{ 3627{
3626 return __do_kmalloc_node(size, flags, node, caller); 3628 return __do_kmalloc_node(size, flags, node, (void *)caller);
3627} 3629}
3628EXPORT_SYMBOL(__kmalloc_node_track_caller); 3630EXPORT_SYMBOL(__kmalloc_node_track_caller);
3629#else 3631#else
@@ -3665,9 +3667,9 @@ void *__kmalloc(size_t size, gfp_t flags)
3665} 3667}
3666EXPORT_SYMBOL(__kmalloc); 3668EXPORT_SYMBOL(__kmalloc);
3667 3669
3668void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3670void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3669{ 3671{
3670 return __do_kmalloc(size, flags, caller); 3672 return __do_kmalloc(size, flags, (void *)caller);
3671} 3673}
3672EXPORT_SYMBOL(__kmalloc_track_caller); 3674EXPORT_SYMBOL(__kmalloc_track_caller);
3673 3675
diff --git a/mm/slub.c b/mm/slub.c
index 640fde7e354c..6cb7ad107852 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -154,6 +154,10 @@
154#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 154#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
155#endif 155#endif
156 156
157#define OO_SHIFT 16
158#define OO_MASK ((1 << OO_SHIFT) - 1)
159#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
160
157/* Internal SLUB flags */ 161/* Internal SLUB flags */
158#define __OBJECT_POISON 0x80000000 /* Poison object */ 162#define __OBJECT_POISON 0x80000000 /* Poison object */
159#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 163#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
@@ -179,7 +183,7 @@ static LIST_HEAD(slab_caches);
179 * Tracking user of a slab. 183 * Tracking user of a slab.
180 */ 184 */
181struct track { 185struct track {
182 void *addr; /* Called from address */ 186 unsigned long addr; /* Called from address */
183 int cpu; /* Was running on cpu */ 187 int cpu; /* Was running on cpu */
184 int pid; /* Pid context */ 188 int pid; /* Pid context */
185 unsigned long when; /* When did the operation occur */ 189 unsigned long when; /* When did the operation occur */
@@ -291,7 +295,7 @@ static inline struct kmem_cache_order_objects oo_make(int order,
291 unsigned long size) 295 unsigned long size)
292{ 296{
293 struct kmem_cache_order_objects x = { 297 struct kmem_cache_order_objects x = {
294 (order << 16) + (PAGE_SIZE << order) / size 298 (order << OO_SHIFT) + (PAGE_SIZE << order) / size
295 }; 299 };
296 300
297 return x; 301 return x;
@@ -299,12 +303,12 @@ static inline struct kmem_cache_order_objects oo_make(int order,
299 303
300static inline int oo_order(struct kmem_cache_order_objects x) 304static inline int oo_order(struct kmem_cache_order_objects x)
301{ 305{
302 return x.x >> 16; 306 return x.x >> OO_SHIFT;
303} 307}
304 308
305static inline int oo_objects(struct kmem_cache_order_objects x) 309static inline int oo_objects(struct kmem_cache_order_objects x)
306{ 310{
307 return x.x & ((1 << 16) - 1); 311 return x.x & OO_MASK;
308} 312}
309 313
310#ifdef CONFIG_SLUB_DEBUG 314#ifdef CONFIG_SLUB_DEBUG
@@ -368,7 +372,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,
368} 372}
369 373
370static void set_track(struct kmem_cache *s, void *object, 374static void set_track(struct kmem_cache *s, void *object,
371 enum track_item alloc, void *addr) 375 enum track_item alloc, unsigned long addr)
372{ 376{
373 struct track *p; 377 struct track *p;
374 378
@@ -392,8 +396,8 @@ static void init_tracking(struct kmem_cache *s, void *object)
392 if (!(s->flags & SLAB_STORE_USER)) 396 if (!(s->flags & SLAB_STORE_USER))
393 return; 397 return;
394 398
395 set_track(s, object, TRACK_FREE, NULL); 399 set_track(s, object, TRACK_FREE, 0UL);
396 set_track(s, object, TRACK_ALLOC, NULL); 400 set_track(s, object, TRACK_ALLOC, 0UL);
397} 401}
398 402
399static void print_track(const char *s, struct track *t) 403static void print_track(const char *s, struct track *t)
@@ -402,7 +406,7 @@ static void print_track(const char *s, struct track *t)
402 return; 406 return;
403 407
404 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 408 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
405 s, t->addr, jiffies - t->when, t->cpu, t->pid); 409 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
406} 410}
407 411
408static void print_tracking(struct kmem_cache *s, void *object) 412static void print_tracking(struct kmem_cache *s, void *object)
@@ -693,7 +697,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
693 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 697 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
694 object_err(s, page, p, "Freepointer corrupt"); 698 object_err(s, page, p, "Freepointer corrupt");
695 /* 699 /*
696 * No choice but to zap it and thus loose the remainder 700 * No choice but to zap it and thus lose the remainder
697 * of the free objects in this slab. May cause 701 * of the free objects in this slab. May cause
698 * another error because the object count is now wrong. 702 * another error because the object count is now wrong.
699 */ 703 */
@@ -765,8 +769,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
765 } 769 }
766 770
767 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 771 max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
768 if (max_objects > 65535) 772 if (max_objects > MAX_OBJS_PER_PAGE)
769 max_objects = 65535; 773 max_objects = MAX_OBJS_PER_PAGE;
770 774
771 if (page->objects != max_objects) { 775 if (page->objects != max_objects) {
772 slab_err(s, page, "Wrong number of objects. Found %d but " 776 slab_err(s, page, "Wrong number of objects. Found %d but "
@@ -867,7 +871,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
867} 871}
868 872
869static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 873static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
870 void *object, void *addr) 874 void *object, unsigned long addr)
871{ 875{
872 if (!check_slab(s, page)) 876 if (!check_slab(s, page))
873 goto bad; 877 goto bad;
@@ -907,7 +911,7 @@ bad:
907} 911}
908 912
909static int free_debug_processing(struct kmem_cache *s, struct page *page, 913static int free_debug_processing(struct kmem_cache *s, struct page *page,
910 void *object, void *addr) 914 void *object, unsigned long addr)
911{ 915{
912 if (!check_slab(s, page)) 916 if (!check_slab(s, page))
913 goto fail; 917 goto fail;
@@ -1030,10 +1034,10 @@ static inline void setup_object_debug(struct kmem_cache *s,
1030 struct page *page, void *object) {} 1034 struct page *page, void *object) {}
1031 1035
1032static inline int alloc_debug_processing(struct kmem_cache *s, 1036static inline int alloc_debug_processing(struct kmem_cache *s,
1033 struct page *page, void *object, void *addr) { return 0; } 1037 struct page *page, void *object, unsigned long addr) { return 0; }
1034 1038
1035static inline int free_debug_processing(struct kmem_cache *s, 1039static inline int free_debug_processing(struct kmem_cache *s,
1036 struct page *page, void *object, void *addr) { return 0; } 1040 struct page *page, void *object, unsigned long addr) { return 0; }
1037 1041
1038static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1042static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1039 { return 1; } 1043 { return 1; }
@@ -1500,8 +1504,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
1500 * we need to allocate a new slab. This is the slowest path since it involves 1504 * we need to allocate a new slab. This is the slowest path since it involves
1501 * a call to the page allocator and the setup of a new slab. 1505 * a call to the page allocator and the setup of a new slab.
1502 */ 1506 */
1503static void *__slab_alloc(struct kmem_cache *s, 1507static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1504 gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) 1508 unsigned long addr, struct kmem_cache_cpu *c)
1505{ 1509{
1506 void **object; 1510 void **object;
1507 struct page *new; 1511 struct page *new;
@@ -1585,13 +1589,15 @@ debug:
1585 * Otherwise we can simply pick the next object from the lockless free list. 1589 * Otherwise we can simply pick the next object from the lockless free list.
1586 */ 1590 */
1587static __always_inline void *slab_alloc(struct kmem_cache *s, 1591static __always_inline void *slab_alloc(struct kmem_cache *s,
1588 gfp_t gfpflags, int node, void *addr) 1592 gfp_t gfpflags, int node, unsigned long addr)
1589{ 1593{
1590 void **object; 1594 void **object;
1591 struct kmem_cache_cpu *c; 1595 struct kmem_cache_cpu *c;
1592 unsigned long flags; 1596 unsigned long flags;
1593 unsigned int objsize; 1597 unsigned int objsize;
1594 1598
1599 might_sleep_if(gfpflags & __GFP_WAIT);
1600
1595 if (should_failslab(s->objsize, gfpflags)) 1601 if (should_failslab(s->objsize, gfpflags))
1596 return NULL; 1602 return NULL;
1597 1603
@@ -1617,14 +1623,14 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1617 1623
1618void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1624void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1619{ 1625{
1620 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); 1626 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1621} 1627}
1622EXPORT_SYMBOL(kmem_cache_alloc); 1628EXPORT_SYMBOL(kmem_cache_alloc);
1623 1629
1624#ifdef CONFIG_NUMA 1630#ifdef CONFIG_NUMA
1625void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1631void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1626{ 1632{
1627 return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); 1633 return slab_alloc(s, gfpflags, node, _RET_IP_);
1628} 1634}
1629EXPORT_SYMBOL(kmem_cache_alloc_node); 1635EXPORT_SYMBOL(kmem_cache_alloc_node);
1630#endif 1636#endif
@@ -1638,7 +1644,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
1638 * handling required then we can return immediately. 1644 * handling required then we can return immediately.
1639 */ 1645 */
1640static void __slab_free(struct kmem_cache *s, struct page *page, 1646static void __slab_free(struct kmem_cache *s, struct page *page,
1641 void *x, void *addr, unsigned int offset) 1647 void *x, unsigned long addr, unsigned int offset)
1642{ 1648{
1643 void *prior; 1649 void *prior;
1644 void **object = (void *)x; 1650 void **object = (void *)x;
@@ -1708,7 +1714,7 @@ debug:
1708 * with all sorts of special processing. 1714 * with all sorts of special processing.
1709 */ 1715 */
1710static __always_inline void slab_free(struct kmem_cache *s, 1716static __always_inline void slab_free(struct kmem_cache *s,
1711 struct page *page, void *x, void *addr) 1717 struct page *page, void *x, unsigned long addr)
1712{ 1718{
1713 void **object = (void *)x; 1719 void **object = (void *)x;
1714 struct kmem_cache_cpu *c; 1720 struct kmem_cache_cpu *c;
@@ -1735,11 +1741,11 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1735 1741
1736 page = virt_to_head_page(x); 1742 page = virt_to_head_page(x);
1737 1743
1738 slab_free(s, page, x, __builtin_return_address(0)); 1744 slab_free(s, page, x, _RET_IP_);
1739} 1745}
1740EXPORT_SYMBOL(kmem_cache_free); 1746EXPORT_SYMBOL(kmem_cache_free);
1741 1747
1742/* Figure out on which slab object the object resides */ 1748/* Figure out on which slab page the object resides */
1743static struct page *get_object_page(const void *x) 1749static struct page *get_object_page(const void *x)
1744{ 1750{
1745 struct page *page = virt_to_head_page(x); 1751 struct page *page = virt_to_head_page(x);
@@ -1811,8 +1817,8 @@ static inline int slab_order(int size, int min_objects,
1811 int rem; 1817 int rem;
1812 int min_order = slub_min_order; 1818 int min_order = slub_min_order;
1813 1819
1814 if ((PAGE_SIZE << min_order) / size > 65535) 1820 if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
1815 return get_order(size * 65535) - 1; 1821 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
1816 1822
1817 for (order = max(min_order, 1823 for (order = max(min_order,
1818 fls(min_objects * size - 1) - PAGE_SHIFT); 1824 fls(min_objects * size - 1) - PAGE_SHIFT);
@@ -2077,8 +2083,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2077 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2083 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2078 * memory on a fresh node that has no slab structures yet. 2084 * memory on a fresh node that has no slab structures yet.
2079 */ 2085 */
2080static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, 2086static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
2081 int node)
2082{ 2087{
2083 struct page *page; 2088 struct page *page;
2084 struct kmem_cache_node *n; 2089 struct kmem_cache_node *n;
@@ -2116,7 +2121,6 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2116 local_irq_save(flags); 2121 local_irq_save(flags);
2117 add_partial(n, page, 0); 2122 add_partial(n, page, 0);
2118 local_irq_restore(flags); 2123 local_irq_restore(flags);
2119 return n;
2120} 2124}
2121 2125
2122static void free_kmem_cache_nodes(struct kmem_cache *s) 2126static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -2148,8 +2152,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2148 n = &s->local_node; 2152 n = &s->local_node;
2149 else { 2153 else {
2150 if (slab_state == DOWN) { 2154 if (slab_state == DOWN) {
2151 n = early_kmem_cache_node_alloc(gfpflags, 2155 early_kmem_cache_node_alloc(gfpflags, node);
2152 node);
2153 continue; 2156 continue;
2154 } 2157 }
2155 n = kmem_cache_alloc_node(kmalloc_caches, 2158 n = kmem_cache_alloc_node(kmalloc_caches,
@@ -2663,7 +2666,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2663 if (unlikely(ZERO_OR_NULL_PTR(s))) 2666 if (unlikely(ZERO_OR_NULL_PTR(s)))
2664 return s; 2667 return s;
2665 2668
2666 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2669 return slab_alloc(s, flags, -1, _RET_IP_);
2667} 2670}
2668EXPORT_SYMBOL(__kmalloc); 2671EXPORT_SYMBOL(__kmalloc);
2669 2672
@@ -2691,7 +2694,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2691 if (unlikely(ZERO_OR_NULL_PTR(s))) 2694 if (unlikely(ZERO_OR_NULL_PTR(s)))
2692 return s; 2695 return s;
2693 2696
2694 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2697 return slab_alloc(s, flags, node, _RET_IP_);
2695} 2698}
2696EXPORT_SYMBOL(__kmalloc_node); 2699EXPORT_SYMBOL(__kmalloc_node);
2697#endif 2700#endif
@@ -2748,7 +2751,7 @@ void kfree(const void *x)
2748 put_page(page); 2751 put_page(page);
2749 return; 2752 return;
2750 } 2753 }
2751 slab_free(page->slab, page, object, __builtin_return_address(0)); 2754 slab_free(page->slab, page, object, _RET_IP_);
2752} 2755}
2753EXPORT_SYMBOL(kfree); 2756EXPORT_SYMBOL(kfree);
2754 2757
@@ -3127,8 +3130,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3127 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3130 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3128 up_write(&slub_lock); 3131 up_write(&slub_lock);
3129 3132
3130 if (sysfs_slab_alias(s, name)) 3133 if (sysfs_slab_alias(s, name)) {
3134 down_write(&slub_lock);
3135 s->refcount--;
3136 up_write(&slub_lock);
3131 goto err; 3137 goto err;
3138 }
3132 return s; 3139 return s;
3133 } 3140 }
3134 3141
@@ -3138,8 +3145,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3138 size, align, flags, ctor)) { 3145 size, align, flags, ctor)) {
3139 list_add(&s->list, &slab_caches); 3146 list_add(&s->list, &slab_caches);
3140 up_write(&slub_lock); 3147 up_write(&slub_lock);
3141 if (sysfs_slab_add(s)) 3148 if (sysfs_slab_add(s)) {
3149 down_write(&slub_lock);
3150 list_del(&s->list);
3151 up_write(&slub_lock);
3152 kfree(s);
3142 goto err; 3153 goto err;
3154 }
3143 return s; 3155 return s;
3144 } 3156 }
3145 kfree(s); 3157 kfree(s);
@@ -3206,7 +3218,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
3206 3218
3207#endif 3219#endif
3208 3220
3209void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 3221void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3210{ 3222{
3211 struct kmem_cache *s; 3223 struct kmem_cache *s;
3212 3224
@@ -3222,7 +3234,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
3222} 3234}
3223 3235
3224void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3236void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3225 int node, void *caller) 3237 int node, unsigned long caller)
3226{ 3238{
3227 struct kmem_cache *s; 3239 struct kmem_cache *s;
3228 3240
@@ -3433,7 +3445,7 @@ static void resiliency_test(void) {};
3433 3445
3434struct location { 3446struct location {
3435 unsigned long count; 3447 unsigned long count;
3436 void *addr; 3448 unsigned long addr;
3437 long long sum_time; 3449 long long sum_time;
3438 long min_time; 3450 long min_time;
3439 long max_time; 3451 long max_time;
@@ -3481,7 +3493,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3481{ 3493{
3482 long start, end, pos; 3494 long start, end, pos;
3483 struct location *l; 3495 struct location *l;
3484 void *caddr; 3496 unsigned long caddr;
3485 unsigned long age = jiffies - track->when; 3497 unsigned long age = jiffies - track->when;
3486 3498
3487 start = -1; 3499 start = -1;
@@ -4349,7 +4361,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
4349 4361
4350/* 4362/*
4351 * Need to buffer aliases during bootup until sysfs becomes 4363 * Need to buffer aliases during bootup until sysfs becomes
4352 * available lest we loose that information. 4364 * available lest we lose that information.
4353 */ 4365 */
4354struct saved_alias { 4366struct saved_alias {
4355 struct kmem_cache *s; 4367 struct kmem_cache *s;