diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 80 |
1 files changed, 66 insertions, 14 deletions
@@ -17,9 +17,11 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/proc_fs.h> | 18 | #include <linux/proc_fs.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <trace/kmemtrace.h> | 20 | #include <linux/kmemtrace.h> |
21 | #include <linux/kmemcheck.h> | ||
21 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
22 | #include <linux/cpuset.h> | 23 | #include <linux/cpuset.h> |
24 | #include <linux/kmemleak.h> | ||
23 | #include <linux/mempolicy.h> | 25 | #include <linux/mempolicy.h> |
24 | #include <linux/ctype.h> | 26 | #include <linux/ctype.h> |
25 | #include <linux/debugobjects.h> | 27 | #include <linux/debugobjects.h> |
@@ -143,10 +145,10 @@ | |||
143 | * Set of flags that will prevent slab merging | 145 | * Set of flags that will prevent slab merging |
144 | */ | 146 | */ |
145 | #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | 147 | #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
146 | SLAB_TRACE | SLAB_DESTROY_BY_RCU) | 148 | SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE) |
147 | 149 | ||
148 | #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ | 150 | #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ |
149 | SLAB_CACHE_DMA) | 151 | SLAB_CACHE_DMA | SLAB_NOTRACK) |
150 | 152 | ||
151 | #ifndef ARCH_KMALLOC_MINALIGN | 153 | #ifndef ARCH_KMALLOC_MINALIGN |
152 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | 154 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) |
@@ -177,6 +179,12 @@ static enum { | |||
177 | SYSFS /* Sysfs up */ | 179 | SYSFS /* Sysfs up */ |
178 | } slab_state = DOWN; | 180 | } slab_state = DOWN; |
179 | 181 | ||
182 | /* | ||
183 | * The slab allocator is initialized with interrupts disabled. Therefore, make | ||
184 | * sure early boot allocations don't accidentally enable interrupts. | ||
185 | */ | ||
186 | static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; | ||
187 | |||
180 | /* A list of all slab caches on the system */ | 188 | /* A list of all slab caches on the system */ |
181 | static DECLARE_RWSEM(slub_lock); | 189 | static DECLARE_RWSEM(slub_lock); |
182 | static LIST_HEAD(slab_caches); | 190 | static LIST_HEAD(slab_caches); |
@@ -1071,6 +1079,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node, | |||
1071 | { | 1079 | { |
1072 | int order = oo_order(oo); | 1080 | int order = oo_order(oo); |
1073 | 1081 | ||
1082 | flags |= __GFP_NOTRACK; | ||
1083 | |||
1074 | if (node == -1) | 1084 | if (node == -1) |
1075 | return alloc_pages(flags, order); | 1085 | return alloc_pages(flags, order); |
1076 | else | 1086 | else |
@@ -1098,6 +1108,24 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1098 | 1108 | ||
1099 | stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); | 1109 | stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); |
1100 | } | 1110 | } |
1111 | |||
1112 | if (kmemcheck_enabled | ||
1113 | && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) | ||
1114 | { | ||
1115 | int pages = 1 << oo_order(oo); | ||
1116 | |||
1117 | kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); | ||
1118 | |||
1119 | /* | ||
1120 | * Objects from caches that have a constructor don't get | ||
1121 | * cleared when they're allocated, so we need to do it here. | ||
1122 | */ | ||
1123 | if (s->ctor) | ||
1124 | kmemcheck_mark_uninitialized_pages(page, pages); | ||
1125 | else | ||
1126 | kmemcheck_mark_unallocated_pages(page, pages); | ||
1127 | } | ||
1128 | |||
1101 | page->objects = oo_objects(oo); | 1129 | page->objects = oo_objects(oo); |
1102 | mod_zone_page_state(page_zone(page), | 1130 | mod_zone_page_state(page_zone(page), |
1103 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? | 1131 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
@@ -1171,6 +1199,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1171 | __ClearPageSlubDebug(page); | 1199 | __ClearPageSlubDebug(page); |
1172 | } | 1200 | } |
1173 | 1201 | ||
1202 | kmemcheck_free_shadow(page, compound_order(page)); | ||
1203 | |||
1174 | mod_zone_page_state(page_zone(page), | 1204 | mod_zone_page_state(page_zone(page), |
1175 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? | 1205 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
1176 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, | 1206 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, |
@@ -1662,6 +1692,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1662 | unsigned long flags; | 1692 | unsigned long flags; |
1663 | unsigned int objsize; | 1693 | unsigned int objsize; |
1664 | 1694 | ||
1695 | gfpflags &= slab_gfp_mask; | ||
1696 | |||
1665 | lockdep_trace_alloc(gfpflags); | 1697 | lockdep_trace_alloc(gfpflags); |
1666 | might_sleep_if(gfpflags & __GFP_WAIT); | 1698 | might_sleep_if(gfpflags & __GFP_WAIT); |
1667 | 1699 | ||
@@ -1685,6 +1717,9 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1685 | if (unlikely((gfpflags & __GFP_ZERO) && object)) | 1717 | if (unlikely((gfpflags & __GFP_ZERO) && object)) |
1686 | memset(object, 0, objsize); | 1718 | memset(object, 0, objsize); |
1687 | 1719 | ||
1720 | kmemcheck_slab_alloc(s, gfpflags, object, c->objsize); | ||
1721 | kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags); | ||
1722 | |||
1688 | return object; | 1723 | return object; |
1689 | } | 1724 | } |
1690 | 1725 | ||
@@ -1814,8 +1849,10 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
1814 | struct kmem_cache_cpu *c; | 1849 | struct kmem_cache_cpu *c; |
1815 | unsigned long flags; | 1850 | unsigned long flags; |
1816 | 1851 | ||
1852 | kmemleak_free_recursive(x, s->flags); | ||
1817 | local_irq_save(flags); | 1853 | local_irq_save(flags); |
1818 | c = get_cpu_slab(s, smp_processor_id()); | 1854 | c = get_cpu_slab(s, smp_processor_id()); |
1855 | kmemcheck_slab_free(s, object, c->objsize); | ||
1819 | debug_check_no_locks_freed(object, c->objsize); | 1856 | debug_check_no_locks_freed(object, c->objsize); |
1820 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | 1857 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) |
1821 | debug_check_no_obj_freed(object, c->objsize); | 1858 | debug_check_no_obj_freed(object, c->objsize); |
@@ -2625,13 +2662,16 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, | |||
2625 | if (gfp_flags & SLUB_DMA) | 2662 | if (gfp_flags & SLUB_DMA) |
2626 | flags = SLAB_CACHE_DMA; | 2663 | flags = SLAB_CACHE_DMA; |
2627 | 2664 | ||
2628 | down_write(&slub_lock); | 2665 | /* |
2666 | * This function is called with IRQs disabled during early-boot on | ||
2667 | * single CPU so there's no need to take slub_lock here. | ||
2668 | */ | ||
2629 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, | 2669 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, |
2630 | flags, NULL)) | 2670 | flags, NULL)) |
2631 | goto panic; | 2671 | goto panic; |
2632 | 2672 | ||
2633 | list_add(&s->list, &slab_caches); | 2673 | list_add(&s->list, &slab_caches); |
2634 | up_write(&slub_lock); | 2674 | |
2635 | if (sysfs_slab_add(s)) | 2675 | if (sysfs_slab_add(s)) |
2636 | goto panic; | 2676 | goto panic; |
2637 | return s; | 2677 | return s; |
@@ -2687,7 +2727,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | |||
2687 | 2727 | ||
2688 | if (!s || !text || !kmem_cache_open(s, flags, text, | 2728 | if (!s || !text || !kmem_cache_open(s, flags, text, |
2689 | realsize, ARCH_KMALLOC_MINALIGN, | 2729 | realsize, ARCH_KMALLOC_MINALIGN, |
2690 | SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { | 2730 | SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED, |
2731 | NULL)) { | ||
2691 | kfree(s); | 2732 | kfree(s); |
2692 | kfree(text); | 2733 | kfree(text); |
2693 | goto unlock_out; | 2734 | goto unlock_out; |
@@ -2781,9 +2822,10 @@ EXPORT_SYMBOL(__kmalloc); | |||
2781 | 2822 | ||
2782 | static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | 2823 | static void *kmalloc_large_node(size_t size, gfp_t flags, int node) |
2783 | { | 2824 | { |
2784 | struct page *page = alloc_pages_node(node, flags | __GFP_COMP, | 2825 | struct page *page; |
2785 | get_order(size)); | ||
2786 | 2826 | ||
2827 | flags |= __GFP_COMP | __GFP_NOTRACK; | ||
2828 | page = alloc_pages_node(node, flags, get_order(size)); | ||
2787 | if (page) | 2829 | if (page) |
2788 | return page_address(page); | 2830 | return page_address(page); |
2789 | else | 2831 | else |
@@ -3089,7 +3131,7 @@ void __init kmem_cache_init(void) | |||
3089 | * kmem_cache_open for slab_state == DOWN. | 3131 | * kmem_cache_open for slab_state == DOWN. |
3090 | */ | 3132 | */ |
3091 | create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", | 3133 | create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", |
3092 | sizeof(struct kmem_cache_node), GFP_KERNEL); | 3134 | sizeof(struct kmem_cache_node), GFP_NOWAIT); |
3093 | kmalloc_caches[0].refcount = -1; | 3135 | kmalloc_caches[0].refcount = -1; |
3094 | caches++; | 3136 | caches++; |
3095 | 3137 | ||
@@ -3102,16 +3144,16 @@ void __init kmem_cache_init(void) | |||
3102 | /* Caches that are not of the two-to-the-power-of size */ | 3144 | /* Caches that are not of the two-to-the-power-of size */ |
3103 | if (KMALLOC_MIN_SIZE <= 64) { | 3145 | if (KMALLOC_MIN_SIZE <= 64) { |
3104 | create_kmalloc_cache(&kmalloc_caches[1], | 3146 | create_kmalloc_cache(&kmalloc_caches[1], |
3105 | "kmalloc-96", 96, GFP_KERNEL); | 3147 | "kmalloc-96", 96, GFP_NOWAIT); |
3106 | caches++; | 3148 | caches++; |
3107 | create_kmalloc_cache(&kmalloc_caches[2], | 3149 | create_kmalloc_cache(&kmalloc_caches[2], |
3108 | "kmalloc-192", 192, GFP_KERNEL); | 3150 | "kmalloc-192", 192, GFP_NOWAIT); |
3109 | caches++; | 3151 | caches++; |
3110 | } | 3152 | } |
3111 | 3153 | ||
3112 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { | 3154 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
3113 | create_kmalloc_cache(&kmalloc_caches[i], | 3155 | create_kmalloc_cache(&kmalloc_caches[i], |
3114 | "kmalloc", 1 << i, GFP_KERNEL); | 3156 | "kmalloc", 1 << i, GFP_NOWAIT); |
3115 | caches++; | 3157 | caches++; |
3116 | } | 3158 | } |
3117 | 3159 | ||
@@ -3148,7 +3190,7 @@ void __init kmem_cache_init(void) | |||
3148 | /* Provide the correct kmalloc names now that the caches are up */ | 3190 | /* Provide the correct kmalloc names now that the caches are up */ |
3149 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) | 3191 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) |
3150 | kmalloc_caches[i]. name = | 3192 | kmalloc_caches[i]. name = |
3151 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 3193 | kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); |
3152 | 3194 | ||
3153 | #ifdef CONFIG_SMP | 3195 | #ifdef CONFIG_SMP |
3154 | register_cpu_notifier(&slab_notifier); | 3196 | register_cpu_notifier(&slab_notifier); |
@@ -3166,6 +3208,14 @@ void __init kmem_cache_init(void) | |||
3166 | nr_cpu_ids, nr_node_ids); | 3208 | nr_cpu_ids, nr_node_ids); |
3167 | } | 3209 | } |
3168 | 3210 | ||
3211 | void __init kmem_cache_init_late(void) | ||
3212 | { | ||
3213 | /* | ||
3214 | * Interrupts are enabled now so all GFP allocations are safe. | ||
3215 | */ | ||
3216 | slab_gfp_mask = __GFP_BITS_MASK; | ||
3217 | } | ||
3218 | |||
3169 | /* | 3219 | /* |
3170 | * Find a mergeable slab cache | 3220 | * Find a mergeable slab cache |
3171 | */ | 3221 | */ |
@@ -3764,7 +3814,7 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
3764 | to_cpumask(l->cpus)); | 3814 | to_cpumask(l->cpus)); |
3765 | } | 3815 | } |
3766 | 3816 | ||
3767 | if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && | 3817 | if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && |
3768 | len < PAGE_SIZE - 60) { | 3818 | len < PAGE_SIZE - 60) { |
3769 | len += sprintf(buf + len, " nodes="); | 3819 | len += sprintf(buf + len, " nodes="); |
3770 | len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, | 3820 | len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, |
@@ -4439,6 +4489,8 @@ static char *create_unique_id(struct kmem_cache *s) | |||
4439 | *p++ = 'a'; | 4489 | *p++ = 'a'; |
4440 | if (s->flags & SLAB_DEBUG_FREE) | 4490 | if (s->flags & SLAB_DEBUG_FREE) |
4441 | *p++ = 'F'; | 4491 | *p++ = 'F'; |
4492 | if (!(s->flags & SLAB_NOTRACK)) | ||
4493 | *p++ = 't'; | ||
4442 | if (p != name + 1) | 4494 | if (p != name + 1) |
4443 | *p++ = '-'; | 4495 | *p++ = '-'; |
4444 | p += sprintf(p, "%07d", s->size); | 4496 | p += sprintf(p, "%07d", s->size); |