diff options
-rw-r--r-- | mm/slab.c | 20 |
1 files changed, 18 insertions, 2 deletions
@@ -114,6 +114,7 @@ | |||
114 | #include <linux/rtmutex.h> | 114 | #include <linux/rtmutex.h> |
115 | #include <linux/reciprocal_div.h> | 115 | #include <linux/reciprocal_div.h> |
116 | #include <linux/debugobjects.h> | 116 | #include <linux/debugobjects.h> |
117 | #include <linux/kmemcheck.h> | ||
117 | 118 | ||
118 | #include <asm/cacheflush.h> | 119 | #include <asm/cacheflush.h> |
119 | #include <asm/tlbflush.h> | 120 | #include <asm/tlbflush.h> |
@@ -179,13 +180,13 @@ | |||
179 | SLAB_STORE_USER | \ | 180 | SLAB_STORE_USER | \ |
180 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 181 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
181 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ | 182 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
182 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) | 183 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) |
183 | #else | 184 | #else |
184 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ | 185 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ |
185 | SLAB_CACHE_DMA | \ | 186 | SLAB_CACHE_DMA | \ |
186 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 187 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
187 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ | 188 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
188 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) | 189 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) |
189 | #endif | 190 | #endif |
190 | 191 | ||
191 | /* | 192 | /* |
@@ -1624,6 +1625,10 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1624 | NR_SLAB_UNRECLAIMABLE, nr_pages); | 1625 | NR_SLAB_UNRECLAIMABLE, nr_pages); |
1625 | for (i = 0; i < nr_pages; i++) | 1626 | for (i = 0; i < nr_pages; i++) |
1626 | __SetPageSlab(page + i); | 1627 | __SetPageSlab(page + i); |
1628 | |||
1629 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) | ||
1630 | kmemcheck_alloc_shadow(cachep, flags, nodeid, page, cachep->gfporder); | ||
1631 | |||
1627 | return page_address(page); | 1632 | return page_address(page); |
1628 | } | 1633 | } |
1629 | 1634 | ||
@@ -1636,6 +1641,9 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1636 | struct page *page = virt_to_page(addr); | 1641 | struct page *page = virt_to_page(addr); |
1637 | const unsigned long nr_freed = i; | 1642 | const unsigned long nr_freed = i; |
1638 | 1643 | ||
1644 | if (kmemcheck_page_is_tracked(page)) | ||
1645 | kmemcheck_free_shadow(cachep, page, cachep->gfporder); | ||
1646 | |||
1639 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1647 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1640 | sub_zone_page_state(page_zone(page), | 1648 | sub_zone_page_state(page_zone(page), |
1641 | NR_SLAB_RECLAIMABLE, nr_freed); | 1649 | NR_SLAB_RECLAIMABLE, nr_freed); |
@@ -3309,6 +3317,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3309 | kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, | 3317 | kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, |
3310 | flags); | 3318 | flags); |
3311 | 3319 | ||
3320 | if (likely(ptr)) | ||
3321 | kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep)); | ||
3322 | |||
3312 | if (unlikely((flags & __GFP_ZERO) && ptr)) | 3323 | if (unlikely((flags & __GFP_ZERO) && ptr)) |
3313 | memset(ptr, 0, obj_size(cachep)); | 3324 | memset(ptr, 0, obj_size(cachep)); |
3314 | 3325 | ||
@@ -3367,6 +3378,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
3367 | flags); | 3378 | flags); |
3368 | prefetchw(objp); | 3379 | prefetchw(objp); |
3369 | 3380 | ||
3381 | if (likely(objp)) | ||
3382 | kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep)); | ||
3383 | |||
3370 | if (unlikely((flags & __GFP_ZERO) && objp)) | 3384 | if (unlikely((flags & __GFP_ZERO) && objp)) |
3371 | memset(objp, 0, obj_size(cachep)); | 3385 | memset(objp, 0, obj_size(cachep)); |
3372 | 3386 | ||
@@ -3483,6 +3497,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3483 | kmemleak_free_recursive(objp, cachep->flags); | 3497 | kmemleak_free_recursive(objp, cachep->flags); |
3484 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); | 3498 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); |
3485 | 3499 | ||
3500 | kmemcheck_slab_free(cachep, objp, obj_size(cachep)); | ||
3501 | |||
3486 | /* | 3502 | /* |
3487 | * Skip calling cache_free_alien() when the platform is not numa. | 3503 | * Skip calling cache_free_alien() when the platform is not numa. |
3488 | * This will avoid cache misses that happen while accessing slabp (which | 3504 | * This will avoid cache misses that happen while accessing slabp (which |