diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 108 |
1 files changed, 24 insertions, 84 deletions
@@ -114,6 +114,7 @@ | |||
114 | #include <linux/rtmutex.h> | 114 | #include <linux/rtmutex.h> |
115 | #include <linux/reciprocal_div.h> | 115 | #include <linux/reciprocal_div.h> |
116 | #include <linux/debugobjects.h> | 116 | #include <linux/debugobjects.h> |
117 | #include <linux/kmemcheck.h> | ||
117 | 118 | ||
118 | #include <asm/cacheflush.h> | 119 | #include <asm/cacheflush.h> |
119 | #include <asm/tlbflush.h> | 120 | #include <asm/tlbflush.h> |
@@ -179,13 +180,13 @@ | |||
179 | SLAB_STORE_USER | \ | 180 | SLAB_STORE_USER | \ |
180 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 181 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
181 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ | 182 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
182 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) | 183 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) |
183 | #else | 184 | #else |
184 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ | 185 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ |
185 | SLAB_CACHE_DMA | \ | 186 | SLAB_CACHE_DMA | \ |
186 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 187 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
187 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ | 188 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
188 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) | 189 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) |
189 | #endif | 190 | #endif |
190 | 191 | ||
191 | /* | 192 | /* |
@@ -380,87 +381,6 @@ static void kmem_list3_init(struct kmem_list3 *parent) | |||
380 | MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ | 381 | MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ |
381 | } while (0) | 382 | } while (0) |
382 | 383 | ||
383 | /* | ||
384 | * struct kmem_cache | ||
385 | * | ||
386 | * manages a cache. | ||
387 | */ | ||
388 | |||
389 | struct kmem_cache { | ||
390 | /* 1) per-cpu data, touched during every alloc/free */ | ||
391 | struct array_cache *array[NR_CPUS]; | ||
392 | /* 2) Cache tunables. Protected by cache_chain_mutex */ | ||
393 | unsigned int batchcount; | ||
394 | unsigned int limit; | ||
395 | unsigned int shared; | ||
396 | |||
397 | unsigned int buffer_size; | ||
398 | u32 reciprocal_buffer_size; | ||
399 | /* 3) touched by every alloc & free from the backend */ | ||
400 | |||
401 | unsigned int flags; /* constant flags */ | ||
402 | unsigned int num; /* # of objs per slab */ | ||
403 | |||
404 | /* 4) cache_grow/shrink */ | ||
405 | /* order of pgs per slab (2^n) */ | ||
406 | unsigned int gfporder; | ||
407 | |||
408 | /* force GFP flags, e.g. GFP_DMA */ | ||
409 | gfp_t gfpflags; | ||
410 | |||
411 | size_t colour; /* cache colouring range */ | ||
412 | unsigned int colour_off; /* colour offset */ | ||
413 | struct kmem_cache *slabp_cache; | ||
414 | unsigned int slab_size; | ||
415 | unsigned int dflags; /* dynamic flags */ | ||
416 | |||
417 | /* constructor func */ | ||
418 | void (*ctor)(void *obj); | ||
419 | |||
420 | /* 5) cache creation/removal */ | ||
421 | const char *name; | ||
422 | struct list_head next; | ||
423 | |||
424 | /* 6) statistics */ | ||
425 | #if STATS | ||
426 | unsigned long num_active; | ||
427 | unsigned long num_allocations; | ||
428 | unsigned long high_mark; | ||
429 | unsigned long grown; | ||
430 | unsigned long reaped; | ||
431 | unsigned long errors; | ||
432 | unsigned long max_freeable; | ||
433 | unsigned long node_allocs; | ||
434 | unsigned long node_frees; | ||
435 | unsigned long node_overflow; | ||
436 | atomic_t allochit; | ||
437 | atomic_t allocmiss; | ||
438 | atomic_t freehit; | ||
439 | atomic_t freemiss; | ||
440 | #endif | ||
441 | #if DEBUG | ||
442 | /* | ||
443 | * If debugging is enabled, then the allocator can add additional | ||
444 | * fields and/or padding to every object. buffer_size contains the total | ||
445 | * object size including these internal fields, the following two | ||
446 | * variables contain the offset to the user object and its size. | ||
447 | */ | ||
448 | int obj_offset; | ||
449 | int obj_size; | ||
450 | #endif | ||
451 | /* | ||
452 | * We put nodelists[] at the end of kmem_cache, because we want to size | ||
453 | * this array to nr_node_ids slots instead of MAX_NUMNODES | ||
454 | * (see kmem_cache_init()) | ||
455 | * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache | ||
456 | * is statically defined, so we reserve the max number of nodes. | ||
457 | */ | ||
458 | struct kmem_list3 *nodelists[MAX_NUMNODES]; | ||
459 | /* | ||
460 | * Do not add fields after nodelists[] | ||
461 | */ | ||
462 | }; | ||
463 | |||
464 | #define CFLGS_OFF_SLAB (0x80000000UL) | 384 | #define CFLGS_OFF_SLAB (0x80000000UL) |
465 | #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) | 385 | #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) |
466 | 386 | ||
@@ -1704,7 +1624,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1704 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1624 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1705 | flags |= __GFP_RECLAIMABLE; | 1625 | flags |= __GFP_RECLAIMABLE; |
1706 | 1626 | ||
1707 | page = alloc_pages_exact_node(nodeid, flags, cachep->gfporder); | 1627 | page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); |
1708 | if (!page) | 1628 | if (!page) |
1709 | return NULL; | 1629 | return NULL; |
1710 | 1630 | ||
@@ -1717,6 +1637,16 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1717 | NR_SLAB_UNRECLAIMABLE, nr_pages); | 1637 | NR_SLAB_UNRECLAIMABLE, nr_pages); |
1718 | for (i = 0; i < nr_pages; i++) | 1638 | for (i = 0; i < nr_pages; i++) |
1719 | __SetPageSlab(page + i); | 1639 | __SetPageSlab(page + i); |
1640 | |||
1641 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { | ||
1642 | kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); | ||
1643 | |||
1644 | if (cachep->ctor) | ||
1645 | kmemcheck_mark_uninitialized_pages(page, nr_pages); | ||
1646 | else | ||
1647 | kmemcheck_mark_unallocated_pages(page, nr_pages); | ||
1648 | } | ||
1649 | |||
1720 | return page_address(page); | 1650 | return page_address(page); |
1721 | } | 1651 | } |
1722 | 1652 | ||
@@ -1729,6 +1659,8 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1729 | struct page *page = virt_to_page(addr); | 1659 | struct page *page = virt_to_page(addr); |
1730 | const unsigned long nr_freed = i; | 1660 | const unsigned long nr_freed = i; |
1731 | 1661 | ||
1662 | kmemcheck_free_shadow(page, cachep->gfporder); | ||
1663 | |||
1732 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1664 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1733 | sub_zone_page_state(page_zone(page), | 1665 | sub_zone_page_state(page_zone(page), |
1734 | NR_SLAB_RECLAIMABLE, nr_freed); | 1666 | NR_SLAB_RECLAIMABLE, nr_freed); |
@@ -3404,6 +3336,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3404 | kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, | 3336 | kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, |
3405 | flags); | 3337 | flags); |
3406 | 3338 | ||
3339 | if (likely(ptr)) | ||
3340 | kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep)); | ||
3341 | |||
3407 | if (unlikely((flags & __GFP_ZERO) && ptr)) | 3342 | if (unlikely((flags & __GFP_ZERO) && ptr)) |
3408 | memset(ptr, 0, obj_size(cachep)); | 3343 | memset(ptr, 0, obj_size(cachep)); |
3409 | 3344 | ||
@@ -3464,6 +3399,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
3464 | flags); | 3399 | flags); |
3465 | prefetchw(objp); | 3400 | prefetchw(objp); |
3466 | 3401 | ||
3402 | if (likely(objp)) | ||
3403 | kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep)); | ||
3404 | |||
3467 | if (unlikely((flags & __GFP_ZERO) && objp)) | 3405 | if (unlikely((flags & __GFP_ZERO) && objp)) |
3468 | memset(objp, 0, obj_size(cachep)); | 3406 | memset(objp, 0, obj_size(cachep)); |
3469 | 3407 | ||
@@ -3580,6 +3518,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3580 | kmemleak_free_recursive(objp, cachep->flags); | 3518 | kmemleak_free_recursive(objp, cachep->flags); |
3581 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); | 3519 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); |
3582 | 3520 | ||
3521 | kmemcheck_slab_free(cachep, objp, obj_size(cachep)); | ||
3522 | |||
3583 | /* | 3523 | /* |
3584 | * Skip calling cache_free_alien() when the platform is not numa. | 3524 | * Skip calling cache_free_alien() when the platform is not numa. |
3585 | * This will avoid cache misses that happen while accessing slabp (which | 3525 | * This will avoid cache misses that happen while accessing slabp (which |