diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 108 |
1 files changed, 24 insertions, 84 deletions
@@ -114,6 +114,7 @@ | |||
114 | #include <linux/rtmutex.h> | 114 | #include <linux/rtmutex.h> |
115 | #include <linux/reciprocal_div.h> | 115 | #include <linux/reciprocal_div.h> |
116 | #include <linux/debugobjects.h> | 116 | #include <linux/debugobjects.h> |
117 | #include <linux/kmemcheck.h> | ||
117 | 118 | ||
118 | #include <asm/cacheflush.h> | 119 | #include <asm/cacheflush.h> |
119 | #include <asm/tlbflush.h> | 120 | #include <asm/tlbflush.h> |
@@ -179,13 +180,13 @@ | |||
179 | SLAB_STORE_USER | \ | 180 | SLAB_STORE_USER | \ |
180 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 181 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
181 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ | 182 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
182 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) | 183 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) |
183 | #else | 184 | #else |
184 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ | 185 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ |
185 | SLAB_CACHE_DMA | \ | 186 | SLAB_CACHE_DMA | \ |
186 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 187 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
187 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ | 188 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
188 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) | 189 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) |
189 | #endif | 190 | #endif |
190 | 191 | ||
191 | /* | 192 | /* |
@@ -380,87 +381,6 @@ static void kmem_list3_init(struct kmem_list3 *parent) | |||
380 | MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ | 381 | MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ |
381 | } while (0) | 382 | } while (0) |
382 | 383 | ||
383 | /* | ||
384 | * struct kmem_cache | ||
385 | * | ||
386 | * manages a cache. | ||
387 | */ | ||
388 | |||
389 | struct kmem_cache { | ||
390 | /* 1) per-cpu data, touched during every alloc/free */ | ||
391 | struct array_cache *array[NR_CPUS]; | ||
392 | /* 2) Cache tunables. Protected by cache_chain_mutex */ | ||
393 | unsigned int batchcount; | ||
394 | unsigned int limit; | ||
395 | unsigned int shared; | ||
396 | |||
397 | unsigned int buffer_size; | ||
398 | u32 reciprocal_buffer_size; | ||
399 | /* 3) touched by every alloc & free from the backend */ | ||
400 | |||
401 | unsigned int flags; /* constant flags */ | ||
402 | unsigned int num; /* # of objs per slab */ | ||
403 | |||
404 | /* 4) cache_grow/shrink */ | ||
405 | /* order of pgs per slab (2^n) */ | ||
406 | unsigned int gfporder; | ||
407 | |||
408 | /* force GFP flags, e.g. GFP_DMA */ | ||
409 | gfp_t gfpflags; | ||
410 | |||
411 | size_t colour; /* cache colouring range */ | ||
412 | unsigned int colour_off; /* colour offset */ | ||
413 | struct kmem_cache *slabp_cache; | ||
414 | unsigned int slab_size; | ||
415 | unsigned int dflags; /* dynamic flags */ | ||
416 | |||
417 | /* constructor func */ | ||
418 | void (*ctor)(void *obj); | ||
419 | |||
420 | /* 5) cache creation/removal */ | ||
421 | const char *name; | ||
422 | struct list_head next; | ||
423 | |||
424 | /* 6) statistics */ | ||
425 | #if STATS | ||
426 | unsigned long num_active; | ||
427 | unsigned long num_allocations; | ||
428 | unsigned long high_mark; | ||
429 | unsigned long grown; | ||
430 | unsigned long reaped; | ||
431 | unsigned long errors; | ||
432 | unsigned long max_freeable; | ||
433 | unsigned long node_allocs; | ||
434 | unsigned long node_frees; | ||
435 | unsigned long node_overflow; | ||
436 | atomic_t allochit; | ||
437 | atomic_t allocmiss; | ||
438 | atomic_t freehit; | ||
439 | atomic_t freemiss; | ||
440 | #endif | ||
441 | #if DEBUG | ||
442 | /* | ||
443 | * If debugging is enabled, then the allocator can add additional | ||
444 | * fields and/or padding to every object. buffer_size contains the total | ||
445 | * object size including these internal fields, the following two | ||
446 | * variables contain the offset to the user object and its size. | ||
447 | */ | ||
448 | int obj_offset; | ||
449 | int obj_size; | ||
450 | #endif | ||
451 | /* | ||
452 | * We put nodelists[] at the end of kmem_cache, because we want to size | ||
453 | * this array to nr_node_ids slots instead of MAX_NUMNODES | ||
454 | * (see kmem_cache_init()) | ||
455 | * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache | ||
456 | * is statically defined, so we reserve the max number of nodes. | ||
457 | */ | ||
458 | struct kmem_list3 *nodelists[MAX_NUMNODES]; | ||
459 | /* | ||
460 | * Do not add fields after nodelists[] | ||
461 | */ | ||
462 | }; | ||
463 | |||
464 | #define CFLGS_OFF_SLAB (0x80000000UL) | 384 | #define CFLGS_OFF_SLAB (0x80000000UL) |
465 | #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) | 385 | #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) |
466 | 386 | ||
@@ -1707,7 +1627,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1707 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1627 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1708 | flags |= __GFP_RECLAIMABLE; | 1628 | flags |= __GFP_RECLAIMABLE; |
1709 | 1629 | ||
1710 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); | 1630 | page = alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); |
1711 | if (!page) | 1631 | if (!page) |
1712 | return NULL; | 1632 | return NULL; |
1713 | 1633 | ||
@@ -1720,6 +1640,16 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1720 | NR_SLAB_UNRECLAIMABLE, nr_pages); | 1640 | NR_SLAB_UNRECLAIMABLE, nr_pages); |
1721 | for (i = 0; i < nr_pages; i++) | 1641 | for (i = 0; i < nr_pages; i++) |
1722 | __SetPageSlab(page + i); | 1642 | __SetPageSlab(page + i); |
1643 | |||
1644 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { | ||
1645 | kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); | ||
1646 | |||
1647 | if (cachep->ctor) | ||
1648 | kmemcheck_mark_uninitialized_pages(page, nr_pages); | ||
1649 | else | ||
1650 | kmemcheck_mark_unallocated_pages(page, nr_pages); | ||
1651 | } | ||
1652 | |||
1723 | return page_address(page); | 1653 | return page_address(page); |
1724 | } | 1654 | } |
1725 | 1655 | ||
@@ -1732,6 +1662,8 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1732 | struct page *page = virt_to_page(addr); | 1662 | struct page *page = virt_to_page(addr); |
1733 | const unsigned long nr_freed = i; | 1663 | const unsigned long nr_freed = i; |
1734 | 1664 | ||
1665 | kmemcheck_free_shadow(page, cachep->gfporder); | ||
1666 | |||
1735 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1667 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1736 | sub_zone_page_state(page_zone(page), | 1668 | sub_zone_page_state(page_zone(page), |
1737 | NR_SLAB_RECLAIMABLE, nr_freed); | 1669 | NR_SLAB_RECLAIMABLE, nr_freed); |
@@ -3407,6 +3339,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3407 | kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, | 3339 | kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, |
3408 | flags); | 3340 | flags); |
3409 | 3341 | ||
3342 | if (likely(ptr)) | ||
3343 | kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep)); | ||
3344 | |||
3410 | if (unlikely((flags & __GFP_ZERO) && ptr)) | 3345 | if (unlikely((flags & __GFP_ZERO) && ptr)) |
3411 | memset(ptr, 0, obj_size(cachep)); | 3346 | memset(ptr, 0, obj_size(cachep)); |
3412 | 3347 | ||
@@ -3467,6 +3402,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
3467 | flags); | 3402 | flags); |
3468 | prefetchw(objp); | 3403 | prefetchw(objp); |
3469 | 3404 | ||
3405 | if (likely(objp)) | ||
3406 | kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep)); | ||
3407 | |||
3470 | if (unlikely((flags & __GFP_ZERO) && objp)) | 3408 | if (unlikely((flags & __GFP_ZERO) && objp)) |
3471 | memset(objp, 0, obj_size(cachep)); | 3409 | memset(objp, 0, obj_size(cachep)); |
3472 | 3410 | ||
@@ -3583,6 +3521,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3583 | kmemleak_free_recursive(objp, cachep->flags); | 3521 | kmemleak_free_recursive(objp, cachep->flags); |
3584 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); | 3522 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); |
3585 | 3523 | ||
3524 | kmemcheck_slab_free(cachep, objp, obj_size(cachep)); | ||
3525 | |||
3586 | /* | 3526 | /* |
3587 | * Skip calling cache_free_alien() when the platform is not numa. | 3527 | * Skip calling cache_free_alien() when the platform is not numa. |
3588 | * This will avoid cache misses that happen while accessing slabp (which | 3528 | * This will avoid cache misses that happen while accessing slabp (which |