aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c126
1 files changed, 102 insertions, 24 deletions
diff --git a/mm/slub.c b/mm/slub.c
index dcbfda0b02ed..4c6449310a0e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -18,6 +18,7 @@
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/kmemtrace.h> 20#include <linux/kmemtrace.h>
21#include <linux/kmemcheck.h>
21#include <linux/cpu.h> 22#include <linux/cpu.h>
22#include <linux/cpuset.h> 23#include <linux/cpuset.h>
23#include <linux/kmemleak.h> 24#include <linux/kmemleak.h>
@@ -147,7 +148,7 @@
147 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE) 148 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
148 149
149#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 150#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
150 SLAB_CACHE_DMA) 151 SLAB_CACHE_DMA | SLAB_NOTRACK)
151 152
152#ifndef ARCH_KMALLOC_MINALIGN 153#ifndef ARCH_KMALLOC_MINALIGN
153#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 154#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
@@ -839,6 +840,11 @@ static inline unsigned long slabs_node(struct kmem_cache *s, int node)
839 return atomic_long_read(&n->nr_slabs); 840 return atomic_long_read(&n->nr_slabs);
840} 841}
841 842
843static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
844{
845 return atomic_long_read(&n->nr_slabs);
846}
847
842static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 848static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
843{ 849{
844 struct kmem_cache_node *n = get_node(s, node); 850 struct kmem_cache_node *n = get_node(s, node);
@@ -1057,6 +1063,8 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
1057 1063
1058static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1064static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1059 { return 0; } 1065 { return 0; }
1066static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1067 { return 0; }
1060static inline void inc_slabs_node(struct kmem_cache *s, int node, 1068static inline void inc_slabs_node(struct kmem_cache *s, int node,
1061 int objects) {} 1069 int objects) {}
1062static inline void dec_slabs_node(struct kmem_cache *s, int node, 1070static inline void dec_slabs_node(struct kmem_cache *s, int node,
@@ -1071,6 +1079,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
1071{ 1079{
1072 int order = oo_order(oo); 1080 int order = oo_order(oo);
1073 1081
1082 flags |= __GFP_NOTRACK;
1083
1074 if (node == -1) 1084 if (node == -1)
1075 return alloc_pages(flags, order); 1085 return alloc_pages(flags, order);
1076 else 1086 else
@@ -1098,6 +1108,24 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1098 1108
1099 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); 1109 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
1100 } 1110 }
1111
1112 if (kmemcheck_enabled
1113 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
1114 {
1115 int pages = 1 << oo_order(oo);
1116
1117 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1118
1119 /*
1120 * Objects from caches that have a constructor don't get
1121 * cleared when they're allocated, so we need to do it here.
1122 */
1123 if (s->ctor)
1124 kmemcheck_mark_uninitialized_pages(page, pages);
1125 else
1126 kmemcheck_mark_unallocated_pages(page, pages);
1127 }
1128
1101 page->objects = oo_objects(oo); 1129 page->objects = oo_objects(oo);
1102 mod_zone_page_state(page_zone(page), 1130 mod_zone_page_state(page_zone(page),
1103 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1131 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
@@ -1171,6 +1199,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1171 __ClearPageSlubDebug(page); 1199 __ClearPageSlubDebug(page);
1172 } 1200 }
1173 1201
1202 kmemcheck_free_shadow(page, compound_order(page));
1203
1174 mod_zone_page_state(page_zone(page), 1204 mod_zone_page_state(page_zone(page),
1175 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1205 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1176 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1206 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
@@ -1491,6 +1521,65 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
1491 return 1; 1521 return 1;
1492} 1522}
1493 1523
1524static int count_free(struct page *page)
1525{
1526 return page->objects - page->inuse;
1527}
1528
1529static unsigned long count_partial(struct kmem_cache_node *n,
1530 int (*get_count)(struct page *))
1531{
1532 unsigned long flags;
1533 unsigned long x = 0;
1534 struct page *page;
1535
1536 spin_lock_irqsave(&n->list_lock, flags);
1537 list_for_each_entry(page, &n->partial, lru)
1538 x += get_count(page);
1539 spin_unlock_irqrestore(&n->list_lock, flags);
1540 return x;
1541}
1542
1543static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
1544{
1545#ifdef CONFIG_SLUB_DEBUG
1546 return atomic_long_read(&n->total_objects);
1547#else
1548 return 0;
1549#endif
1550}
1551
1552static noinline void
1553slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
1554{
1555 int node;
1556
1557 printk(KERN_WARNING
1558 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1559 nid, gfpflags);
1560 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
1561 "default order: %d, min order: %d\n", s->name, s->objsize,
1562 s->size, oo_order(s->oo), oo_order(s->min));
1563
1564 for_each_online_node(node) {
1565 struct kmem_cache_node *n = get_node(s, node);
1566 unsigned long nr_slabs;
1567 unsigned long nr_objs;
1568 unsigned long nr_free;
1569
1570 if (!n)
1571 continue;
1572
1573 nr_free = count_partial(n, count_free);
1574 nr_slabs = node_nr_slabs(n);
1575 nr_objs = node_nr_objs(n);
1576
1577 printk(KERN_WARNING
1578 " node %d: slabs: %ld, objs: %ld, free: %ld\n",
1579 node, nr_slabs, nr_objs, nr_free);
1580 }
1581}
1582
1494/* 1583/*
1495 * Slow path. The lockless freelist is empty or we need to perform 1584 * Slow path. The lockless freelist is empty or we need to perform
1496 * debugging duties. 1585 * debugging duties.
@@ -1572,6 +1661,8 @@ new_slab:
1572 c->page = new; 1661 c->page = new;
1573 goto load_freelist; 1662 goto load_freelist;
1574 } 1663 }
1664 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
1665 slab_out_of_memory(s, gfpflags, node);
1575 return NULL; 1666 return NULL;
1576debug: 1667debug:
1577 if (!alloc_debug_processing(s, c->page, object, addr)) 1668 if (!alloc_debug_processing(s, c->page, object, addr))
@@ -1626,7 +1717,9 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1626 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1717 if (unlikely((gfpflags & __GFP_ZERO) && object))
1627 memset(object, 0, objsize); 1718 memset(object, 0, objsize);
1628 1719
1720 kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
1629 kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags); 1721 kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
1722
1630 return object; 1723 return object;
1631} 1724}
1632 1725
@@ -1759,6 +1852,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
1759 kmemleak_free_recursive(x, s->flags); 1852 kmemleak_free_recursive(x, s->flags);
1760 local_irq_save(flags); 1853 local_irq_save(flags);
1761 c = get_cpu_slab(s, smp_processor_id()); 1854 c = get_cpu_slab(s, smp_processor_id());
1855 kmemcheck_slab_free(s, object, c->objsize);
1762 debug_check_no_locks_freed(object, c->objsize); 1856 debug_check_no_locks_freed(object, c->objsize);
1763 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1857 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1764 debug_check_no_obj_freed(object, c->objsize); 1858 debug_check_no_obj_freed(object, c->objsize);
@@ -2638,7 +2732,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2638 * need to do anything because our sysfs initcall will start by 2732 * need to do anything because our sysfs initcall will start by
2639 * adding all existing slabs to sysfs. 2733 * adding all existing slabs to sysfs.
2640 */ 2734 */
2641 slabflags = SLAB_CACHE_DMA; 2735 slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK;
2642 if (slab_state >= SYSFS) 2736 if (slab_state >= SYSFS)
2643 slabflags |= __SYSFS_ADD_DEFERRED; 2737 slabflags |= __SYSFS_ADD_DEFERRED;
2644 2738
@@ -2738,9 +2832,10 @@ EXPORT_SYMBOL(__kmalloc);
2738 2832
2739static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2833static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2740{ 2834{
2741 struct page *page = alloc_pages_node(node, flags | __GFP_COMP, 2835 struct page *page;
2742 get_order(size));
2743 2836
2837 flags |= __GFP_COMP | __GFP_NOTRACK;
2838 page = alloc_pages_node(node, flags, get_order(size));
2744 if (page) 2839 if (page)
2745 return page_address(page); 2840 return page_address(page);
2746 else 2841 else
@@ -3351,20 +3446,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3351} 3446}
3352 3447
3353#ifdef CONFIG_SLUB_DEBUG 3448#ifdef CONFIG_SLUB_DEBUG
3354static unsigned long count_partial(struct kmem_cache_node *n,
3355 int (*get_count)(struct page *))
3356{
3357 unsigned long flags;
3358 unsigned long x = 0;
3359 struct page *page;
3360
3361 spin_lock_irqsave(&n->list_lock, flags);
3362 list_for_each_entry(page, &n->partial, lru)
3363 x += get_count(page);
3364 spin_unlock_irqrestore(&n->list_lock, flags);
3365 return x;
3366}
3367
3368static int count_inuse(struct page *page) 3449static int count_inuse(struct page *page)
3369{ 3450{
3370 return page->inuse; 3451 return page->inuse;
@@ -3375,11 +3456,6 @@ static int count_total(struct page *page)
3375 return page->objects; 3456 return page->objects;
3376} 3457}
3377 3458
3378static int count_free(struct page *page)
3379{
3380 return page->objects - page->inuse;
3381}
3382
3383static int validate_slab(struct kmem_cache *s, struct page *page, 3459static int validate_slab(struct kmem_cache *s, struct page *page,
3384 unsigned long *map) 3460 unsigned long *map)
3385{ 3461{
@@ -3748,7 +3824,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
3748 to_cpumask(l->cpus)); 3824 to_cpumask(l->cpus));
3749 } 3825 }
3750 3826
3751 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && 3827 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
3752 len < PAGE_SIZE - 60) { 3828 len < PAGE_SIZE - 60) {
3753 len += sprintf(buf + len, " nodes="); 3829 len += sprintf(buf + len, " nodes=");
3754 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3830 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
@@ -4423,6 +4499,8 @@ static char *create_unique_id(struct kmem_cache *s)
4423 *p++ = 'a'; 4499 *p++ = 'a';
4424 if (s->flags & SLAB_DEBUG_FREE) 4500 if (s->flags & SLAB_DEBUG_FREE)
4425 *p++ = 'F'; 4501 *p++ = 'F';
4502 if (!(s->flags & SLAB_NOTRACK))
4503 *p++ = 't';
4426 if (p != name + 1) 4504 if (p != name + 1)
4427 *p++ = '-'; 4505 *p++ = '-';
4428 p += sprintf(p, "%07d", s->size); 4506 p += sprintf(p, "%07d", s->size);