aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c12
-rw-r--r--mm/slub.c79
2 files changed, 47 insertions, 44 deletions
diff --git a/mm/slab.c b/mm/slab.c
index d4b87690b275..cb2e411d93a9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2123,6 +2123,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2123 * 2123 *
2124 * @name must be valid until the cache is destroyed. This implies that 2124 * @name must be valid until the cache is destroyed. This implies that
2125 * the module calling this has to destroy the cache before getting unloaded. 2125 * the module calling this has to destroy the cache before getting unloaded.
2126 * Note that kmem_cache_name() is not guaranteed to return the same pointer,
2127 * therefore applications must manage it themselves.
2126 * 2128 *
2127 * The flags are 2129 * The flags are
2128 * 2130 *
@@ -2997,7 +2999,7 @@ retry:
2997 * there must be at least one object available for 2999 * there must be at least one object available for
2998 * allocation. 3000 * allocation.
2999 */ 3001 */
3000 BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); 3002 BUG_ON(slabp->inuse >= cachep->num);
3001 3003
3002 while (slabp->inuse < cachep->num && batchcount--) { 3004 while (slabp->inuse < cachep->num && batchcount--) {
3003 STATS_INC_ALLOCED(cachep); 3005 STATS_INC_ALLOCED(cachep);
@@ -3686,9 +3688,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3686EXPORT_SYMBOL(__kmalloc_node); 3688EXPORT_SYMBOL(__kmalloc_node);
3687 3689
3688void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3690void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3689 int node, void *caller) 3691 int node, unsigned long caller)
3690{ 3692{
3691 return __do_kmalloc_node(size, flags, node, caller); 3693 return __do_kmalloc_node(size, flags, node, (void *)caller);
3692} 3694}
3693EXPORT_SYMBOL(__kmalloc_node_track_caller); 3695EXPORT_SYMBOL(__kmalloc_node_track_caller);
3694#else 3696#else
@@ -3730,9 +3732,9 @@ void *__kmalloc(size_t size, gfp_t flags)
3730} 3732}
3731EXPORT_SYMBOL(__kmalloc); 3733EXPORT_SYMBOL(__kmalloc);
3732 3734
3733void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3735void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3734{ 3736{
3735 return __do_kmalloc(size, flags, caller); 3737 return __do_kmalloc(size, flags, (void *)caller);
3736} 3738}
3737EXPORT_SYMBOL(__kmalloc_track_caller); 3739EXPORT_SYMBOL(__kmalloc_track_caller);
3738 3740
diff --git a/mm/slub.c b/mm/slub.c
index d057ceb3645f..ca95e45f04c3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -153,6 +153,10 @@
153#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 153#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
154#endif 154#endif
155 155
156#define OO_SHIFT 16
157#define OO_MASK ((1 << OO_SHIFT) - 1)
158#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
159
156/* Internal SLUB flags */ 160/* Internal SLUB flags */
157#define __OBJECT_POISON 0x80000000 /* Poison object */ 161#define __OBJECT_POISON 0x80000000 /* Poison object */
158#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 162#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
@@ -178,7 +182,7 @@ static LIST_HEAD(slab_caches);
178 * Tracking user of a slab. 182 * Tracking user of a slab.
179 */ 183 */
180struct track { 184struct track {
181 void *addr; /* Called from address */ 185 unsigned long addr; /* Called from address */
182 int cpu; /* Was running on cpu */ 186 int cpu; /* Was running on cpu */
183 int pid; /* Pid context */ 187 int pid; /* Pid context */
184 unsigned long when; /* When did the operation occur */ 188 unsigned long when; /* When did the operation occur */
@@ -290,7 +294,7 @@ static inline struct kmem_cache_order_objects oo_make(int order,
290 unsigned long size) 294 unsigned long size)
291{ 295{
292 struct kmem_cache_order_objects x = { 296 struct kmem_cache_order_objects x = {
293 (order << 16) + (PAGE_SIZE << order) / size 297 (order << OO_SHIFT) + (PAGE_SIZE << order) / size
294 }; 298 };
295 299
296 return x; 300 return x;
@@ -298,12 +302,12 @@ static inline struct kmem_cache_order_objects oo_make(int order,
298 302
299static inline int oo_order(struct kmem_cache_order_objects x) 303static inline int oo_order(struct kmem_cache_order_objects x)
300{ 304{
301 return x.x >> 16; 305 return x.x >> OO_SHIFT;
302} 306}
303 307
304static inline int oo_objects(struct kmem_cache_order_objects x) 308static inline int oo_objects(struct kmem_cache_order_objects x)
305{ 309{
306 return x.x & ((1 << 16) - 1); 310 return x.x & OO_MASK;
307} 311}
308 312
309#ifdef CONFIG_SLUB_DEBUG 313#ifdef CONFIG_SLUB_DEBUG
@@ -367,7 +371,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,
367} 371}
368 372
369static void set_track(struct kmem_cache *s, void *object, 373static void set_track(struct kmem_cache *s, void *object,
370 enum track_item alloc, void *addr) 374 enum track_item alloc, unsigned long addr)
371{ 375{
372 struct track *p; 376 struct track *p;
373 377
@@ -391,8 +395,8 @@ static void init_tracking(struct kmem_cache *s, void *object)
391 if (!(s->flags & SLAB_STORE_USER)) 395 if (!(s->flags & SLAB_STORE_USER))
392 return; 396 return;
393 397
394 set_track(s, object, TRACK_FREE, NULL); 398 set_track(s, object, TRACK_FREE, 0UL);
395 set_track(s, object, TRACK_ALLOC, NULL); 399 set_track(s, object, TRACK_ALLOC, 0UL);
396} 400}
397 401
398static void print_track(const char *s, struct track *t) 402static void print_track(const char *s, struct track *t)
@@ -401,7 +405,7 @@ static void print_track(const char *s, struct track *t)
401 return; 405 return;
402 406
403 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 407 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
404 s, t->addr, jiffies - t->when, t->cpu, t->pid); 408 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
405} 409}
406 410
407static void print_tracking(struct kmem_cache *s, void *object) 411static void print_tracking(struct kmem_cache *s, void *object)
@@ -692,7 +696,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
692 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 696 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
693 object_err(s, page, p, "Freepointer corrupt"); 697 object_err(s, page, p, "Freepointer corrupt");
694 /* 698 /*
695 * No choice but to zap it and thus loose the remainder 699 * No choice but to zap it and thus lose the remainder
696 * of the free objects in this slab. May cause 700 * of the free objects in this slab. May cause
697 * another error because the object count is now wrong. 701 * another error because the object count is now wrong.
698 */ 702 */
@@ -764,8 +768,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
764 } 768 }
765 769
766 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 770 max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
767 if (max_objects > 65535) 771 if (max_objects > MAX_OBJS_PER_PAGE)
768 max_objects = 65535; 772 max_objects = MAX_OBJS_PER_PAGE;
769 773
770 if (page->objects != max_objects) { 774 if (page->objects != max_objects) {
771 slab_err(s, page, "Wrong number of objects. Found %d but " 775 slab_err(s, page, "Wrong number of objects. Found %d but "
@@ -866,7 +870,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
866} 870}
867 871
868static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 872static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
869 void *object, void *addr) 873 void *object, unsigned long addr)
870{ 874{
871 if (!check_slab(s, page)) 875 if (!check_slab(s, page))
872 goto bad; 876 goto bad;
@@ -906,7 +910,7 @@ bad:
906} 910}
907 911
908static int free_debug_processing(struct kmem_cache *s, struct page *page, 912static int free_debug_processing(struct kmem_cache *s, struct page *page,
909 void *object, void *addr) 913 void *object, unsigned long addr)
910{ 914{
911 if (!check_slab(s, page)) 915 if (!check_slab(s, page))
912 goto fail; 916 goto fail;
@@ -1029,10 +1033,10 @@ static inline void setup_object_debug(struct kmem_cache *s,
1029 struct page *page, void *object) {} 1033 struct page *page, void *object) {}
1030 1034
1031static inline int alloc_debug_processing(struct kmem_cache *s, 1035static inline int alloc_debug_processing(struct kmem_cache *s,
1032 struct page *page, void *object, void *addr) { return 0; } 1036 struct page *page, void *object, unsigned long addr) { return 0; }
1033 1037
1034static inline int free_debug_processing(struct kmem_cache *s, 1038static inline int free_debug_processing(struct kmem_cache *s,
1035 struct page *page, void *object, void *addr) { return 0; } 1039 struct page *page, void *object, unsigned long addr) { return 0; }
1036 1040
1037static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1041static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1038 { return 1; } 1042 { return 1; }
@@ -1499,8 +1503,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
1499 * we need to allocate a new slab. This is the slowest path since it involves 1503 * we need to allocate a new slab. This is the slowest path since it involves
1500 * a call to the page allocator and the setup of a new slab. 1504 * a call to the page allocator and the setup of a new slab.
1501 */ 1505 */
1502static void *__slab_alloc(struct kmem_cache *s, 1506static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1503 gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) 1507 unsigned long addr, struct kmem_cache_cpu *c)
1504{ 1508{
1505 void **object; 1509 void **object;
1506 struct page *new; 1510 struct page *new;
@@ -1584,7 +1588,7 @@ debug:
1584 * Otherwise we can simply pick the next object from the lockless free list. 1588 * Otherwise we can simply pick the next object from the lockless free list.
1585 */ 1589 */
1586static __always_inline void *slab_alloc(struct kmem_cache *s, 1590static __always_inline void *slab_alloc(struct kmem_cache *s,
1587 gfp_t gfpflags, int node, void *addr) 1591 gfp_t gfpflags, int node, unsigned long addr)
1588{ 1592{
1589 void **object; 1593 void **object;
1590 struct kmem_cache_cpu *c; 1594 struct kmem_cache_cpu *c;
@@ -1614,14 +1618,14 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1614 1618
1615void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1619void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1616{ 1620{
1617 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); 1621 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1618} 1622}
1619EXPORT_SYMBOL(kmem_cache_alloc); 1623EXPORT_SYMBOL(kmem_cache_alloc);
1620 1624
1621#ifdef CONFIG_NUMA 1625#ifdef CONFIG_NUMA
1622void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1626void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1623{ 1627{
1624 return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); 1628 return slab_alloc(s, gfpflags, node, _RET_IP_);
1625} 1629}
1626EXPORT_SYMBOL(kmem_cache_alloc_node); 1630EXPORT_SYMBOL(kmem_cache_alloc_node);
1627#endif 1631#endif
@@ -1635,7 +1639,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
1635 * handling required then we can return immediately. 1639 * handling required then we can return immediately.
1636 */ 1640 */
1637static void __slab_free(struct kmem_cache *s, struct page *page, 1641static void __slab_free(struct kmem_cache *s, struct page *page,
1638 void *x, void *addr, unsigned int offset) 1642 void *x, unsigned long addr, unsigned int offset)
1639{ 1643{
1640 void *prior; 1644 void *prior;
1641 void **object = (void *)x; 1645 void **object = (void *)x;
@@ -1705,7 +1709,7 @@ debug:
1705 * with all sorts of special processing. 1709 * with all sorts of special processing.
1706 */ 1710 */
1707static __always_inline void slab_free(struct kmem_cache *s, 1711static __always_inline void slab_free(struct kmem_cache *s,
1708 struct page *page, void *x, void *addr) 1712 struct page *page, void *x, unsigned long addr)
1709{ 1713{
1710 void **object = (void *)x; 1714 void **object = (void *)x;
1711 struct kmem_cache_cpu *c; 1715 struct kmem_cache_cpu *c;
@@ -1732,11 +1736,11 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1732 1736
1733 page = virt_to_head_page(x); 1737 page = virt_to_head_page(x);
1734 1738
1735 slab_free(s, page, x, __builtin_return_address(0)); 1739 slab_free(s, page, x, _RET_IP_);
1736} 1740}
1737EXPORT_SYMBOL(kmem_cache_free); 1741EXPORT_SYMBOL(kmem_cache_free);
1738 1742
1739/* Figure out on which slab object the object resides */ 1743/* Figure out on which slab page the object resides */
1740static struct page *get_object_page(const void *x) 1744static struct page *get_object_page(const void *x)
1741{ 1745{
1742 struct page *page = virt_to_head_page(x); 1746 struct page *page = virt_to_head_page(x);
@@ -1808,8 +1812,8 @@ static inline int slab_order(int size, int min_objects,
1808 int rem; 1812 int rem;
1809 int min_order = slub_min_order; 1813 int min_order = slub_min_order;
1810 1814
1811 if ((PAGE_SIZE << min_order) / size > 65535) 1815 if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
1812 return get_order(size * 65535) - 1; 1816 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
1813 1817
1814 for (order = max(min_order, 1818 for (order = max(min_order,
1815 fls(min_objects * size - 1) - PAGE_SHIFT); 1819 fls(min_objects * size - 1) - PAGE_SHIFT);
@@ -2074,8 +2078,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2074 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2078 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2075 * memory on a fresh node that has no slab structures yet. 2079 * memory on a fresh node that has no slab structures yet.
2076 */ 2080 */
2077static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, 2081static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
2078 int node)
2079{ 2082{
2080 struct page *page; 2083 struct page *page;
2081 struct kmem_cache_node *n; 2084 struct kmem_cache_node *n;
@@ -2113,7 +2116,6 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2113 local_irq_save(flags); 2116 local_irq_save(flags);
2114 add_partial(n, page, 0); 2117 add_partial(n, page, 0);
2115 local_irq_restore(flags); 2118 local_irq_restore(flags);
2116 return n;
2117} 2119}
2118 2120
2119static void free_kmem_cache_nodes(struct kmem_cache *s) 2121static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -2145,8 +2147,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2145 n = &s->local_node; 2147 n = &s->local_node;
2146 else { 2148 else {
2147 if (slab_state == DOWN) { 2149 if (slab_state == DOWN) {
2148 n = early_kmem_cache_node_alloc(gfpflags, 2150 early_kmem_cache_node_alloc(gfpflags, node);
2149 node);
2150 continue; 2151 continue;
2151 } 2152 }
2152 n = kmem_cache_alloc_node(kmalloc_caches, 2153 n = kmem_cache_alloc_node(kmalloc_caches,
@@ -2660,7 +2661,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2660 if (unlikely(ZERO_OR_NULL_PTR(s))) 2661 if (unlikely(ZERO_OR_NULL_PTR(s)))
2661 return s; 2662 return s;
2662 2663
2663 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2664 return slab_alloc(s, flags, -1, _RET_IP_);
2664} 2665}
2665EXPORT_SYMBOL(__kmalloc); 2666EXPORT_SYMBOL(__kmalloc);
2666 2667
@@ -2688,7 +2689,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2688 if (unlikely(ZERO_OR_NULL_PTR(s))) 2689 if (unlikely(ZERO_OR_NULL_PTR(s)))
2689 return s; 2690 return s;
2690 2691
2691 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2692 return slab_alloc(s, flags, node, _RET_IP_);
2692} 2693}
2693EXPORT_SYMBOL(__kmalloc_node); 2694EXPORT_SYMBOL(__kmalloc_node);
2694#endif 2695#endif
@@ -2745,7 +2746,7 @@ void kfree(const void *x)
2745 put_page(page); 2746 put_page(page);
2746 return; 2747 return;
2747 } 2748 }
2748 slab_free(page->slab, page, object, __builtin_return_address(0)); 2749 slab_free(page->slab, page, object, _RET_IP_);
2749} 2750}
2750EXPORT_SYMBOL(kfree); 2751EXPORT_SYMBOL(kfree);
2751 2752
@@ -3212,7 +3213,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
3212 3213
3213#endif 3214#endif
3214 3215
3215void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 3216void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3216{ 3217{
3217 struct kmem_cache *s; 3218 struct kmem_cache *s;
3218 3219
@@ -3228,7 +3229,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
3228} 3229}
3229 3230
3230void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3231void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3231 int node, void *caller) 3232 int node, unsigned long caller)
3232{ 3233{
3233 struct kmem_cache *s; 3234 struct kmem_cache *s;
3234 3235
@@ -3439,7 +3440,7 @@ static void resiliency_test(void) {};
3439 3440
3440struct location { 3441struct location {
3441 unsigned long count; 3442 unsigned long count;
3442 void *addr; 3443 unsigned long addr;
3443 long long sum_time; 3444 long long sum_time;
3444 long min_time; 3445 long min_time;
3445 long max_time; 3446 long max_time;
@@ -3487,7 +3488,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3487{ 3488{
3488 long start, end, pos; 3489 long start, end, pos;
3489 struct location *l; 3490 struct location *l;
3490 void *caddr; 3491 unsigned long caddr;
3491 unsigned long age = jiffies - track->when; 3492 unsigned long age = jiffies - track->when;
3492 3493
3493 start = -1; 3494 start = -1;
@@ -4355,7 +4356,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
4355 4356
4356/* 4357/*
4357 * Need to buffer aliases during bootup until sysfs becomes 4358 * Need to buffer aliases during bootup until sysfs becomes
4358 * available lest we loose that information. 4359 * available lest we lose that information.
4359 */ 4360 */
4360struct saved_alias { 4361struct saved_alias {
4361 struct kmem_cache *s; 4362 struct kmem_cache *s;