diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 97 |
1 files changed, 82 insertions, 15 deletions
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
18 | #include <linux/seq_file.h> | 18 | #include <linux/seq_file.h> |
19 | #include <trace/kmemtrace.h> | ||
19 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
20 | #include <linux/cpuset.h> | 21 | #include <linux/cpuset.h> |
21 | #include <linux/mempolicy.h> | 22 | #include <linux/mempolicy.h> |
@@ -1623,18 +1624,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1623 | 1624 | ||
1624 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1625 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
1625 | { | 1626 | { |
1626 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | 1627 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); |
1628 | |||
1629 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1630 | s->objsize, s->size, gfpflags); | ||
1631 | |||
1632 | return ret; | ||
1627 | } | 1633 | } |
1628 | EXPORT_SYMBOL(kmem_cache_alloc); | 1634 | EXPORT_SYMBOL(kmem_cache_alloc); |
1629 | 1635 | ||
1636 | #ifdef CONFIG_KMEMTRACE | ||
1637 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
1638 | { | ||
1639 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | ||
1640 | } | ||
1641 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
1642 | #endif | ||
1643 | |||
1630 | #ifdef CONFIG_NUMA | 1644 | #ifdef CONFIG_NUMA |
1631 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1645 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
1632 | { | 1646 | { |
1633 | return slab_alloc(s, gfpflags, node, _RET_IP_); | 1647 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
1648 | |||
1649 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1650 | s->objsize, s->size, gfpflags, node); | ||
1651 | |||
1652 | return ret; | ||
1634 | } | 1653 | } |
1635 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1654 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1636 | #endif | 1655 | #endif |
1637 | 1656 | ||
1657 | #ifdef CONFIG_KMEMTRACE | ||
1658 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
1659 | gfp_t gfpflags, | ||
1660 | int node) | ||
1661 | { | ||
1662 | return slab_alloc(s, gfpflags, node, _RET_IP_); | ||
1663 | } | ||
1664 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
1665 | #endif | ||
1666 | |||
1638 | /* | 1667 | /* |
1639 | * Slow patch handling. This may still be called frequently since objects | 1668 | * Slow patch handling. This may still be called frequently since objects |
1640 | * have a longer lifetime than the cpu slabs in most processing loads. | 1669 | * have a longer lifetime than the cpu slabs in most processing loads. |
@@ -1742,6 +1771,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1742 | page = virt_to_head_page(x); | 1771 | page = virt_to_head_page(x); |
1743 | 1772 | ||
1744 | slab_free(s, page, x, _RET_IP_); | 1773 | slab_free(s, page, x, _RET_IP_); |
1774 | |||
1775 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); | ||
1745 | } | 1776 | } |
1746 | EXPORT_SYMBOL(kmem_cache_free); | 1777 | EXPORT_SYMBOL(kmem_cache_free); |
1747 | 1778 | ||
@@ -2475,7 +2506,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
2475 | * Kmalloc subsystem | 2506 | * Kmalloc subsystem |
2476 | *******************************************************************/ | 2507 | *******************************************************************/ |
2477 | 2508 | ||
2478 | struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; | 2509 | struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; |
2479 | EXPORT_SYMBOL(kmalloc_caches); | 2510 | EXPORT_SYMBOL(kmalloc_caches); |
2480 | 2511 | ||
2481 | static int __init setup_slub_min_order(char *str) | 2512 | static int __init setup_slub_min_order(char *str) |
@@ -2537,7 +2568,7 @@ panic: | |||
2537 | } | 2568 | } |
2538 | 2569 | ||
2539 | #ifdef CONFIG_ZONE_DMA | 2570 | #ifdef CONFIG_ZONE_DMA |
2540 | static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; | 2571 | static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; |
2541 | 2572 | ||
2542 | static void sysfs_add_func(struct work_struct *w) | 2573 | static void sysfs_add_func(struct work_struct *w) |
2543 | { | 2574 | { |
@@ -2657,8 +2688,9 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2657 | void *__kmalloc(size_t size, gfp_t flags) | 2688 | void *__kmalloc(size_t size, gfp_t flags) |
2658 | { | 2689 | { |
2659 | struct kmem_cache *s; | 2690 | struct kmem_cache *s; |
2691 | void *ret; | ||
2660 | 2692 | ||
2661 | if (unlikely(size > PAGE_SIZE)) | 2693 | if (unlikely(size > SLUB_MAX_SIZE)) |
2662 | return kmalloc_large(size, flags); | 2694 | return kmalloc_large(size, flags); |
2663 | 2695 | ||
2664 | s = get_slab(size, flags); | 2696 | s = get_slab(size, flags); |
@@ -2666,7 +2698,12 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2666 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2698 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2667 | return s; | 2699 | return s; |
2668 | 2700 | ||
2669 | return slab_alloc(s, flags, -1, _RET_IP_); | 2701 | ret = slab_alloc(s, flags, -1, _RET_IP_); |
2702 | |||
2703 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2704 | size, s->size, flags); | ||
2705 | |||
2706 | return ret; | ||
2670 | } | 2707 | } |
2671 | EXPORT_SYMBOL(__kmalloc); | 2708 | EXPORT_SYMBOL(__kmalloc); |
2672 | 2709 | ||
@@ -2685,16 +2722,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | |||
2685 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2722 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
2686 | { | 2723 | { |
2687 | struct kmem_cache *s; | 2724 | struct kmem_cache *s; |
2725 | void *ret; | ||
2726 | |||
2727 | if (unlikely(size > SLUB_MAX_SIZE)) { | ||
2728 | ret = kmalloc_large_node(size, flags, node); | ||
2688 | 2729 | ||
2689 | if (unlikely(size > PAGE_SIZE)) | 2730 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, |
2690 | return kmalloc_large_node(size, flags, node); | 2731 | _RET_IP_, ret, |
2732 | size, PAGE_SIZE << get_order(size), | ||
2733 | flags, node); | ||
2734 | |||
2735 | return ret; | ||
2736 | } | ||
2691 | 2737 | ||
2692 | s = get_slab(size, flags); | 2738 | s = get_slab(size, flags); |
2693 | 2739 | ||
2694 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2740 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2695 | return s; | 2741 | return s; |
2696 | 2742 | ||
2697 | return slab_alloc(s, flags, node, _RET_IP_); | 2743 | ret = slab_alloc(s, flags, node, _RET_IP_); |
2744 | |||
2745 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2746 | size, s->size, flags, node); | ||
2747 | |||
2748 | return ret; | ||
2698 | } | 2749 | } |
2699 | EXPORT_SYMBOL(__kmalloc_node); | 2750 | EXPORT_SYMBOL(__kmalloc_node); |
2700 | #endif | 2751 | #endif |
@@ -2753,6 +2804,8 @@ void kfree(const void *x) | |||
2753 | return; | 2804 | return; |
2754 | } | 2805 | } |
2755 | slab_free(page->slab, page, object, _RET_IP_); | 2806 | slab_free(page->slab, page, object, _RET_IP_); |
2807 | |||
2808 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x); | ||
2756 | } | 2809 | } |
2757 | EXPORT_SYMBOL(kfree); | 2810 | EXPORT_SYMBOL(kfree); |
2758 | 2811 | ||
@@ -2986,7 +3039,7 @@ void __init kmem_cache_init(void) | |||
2986 | caches++; | 3039 | caches++; |
2987 | } | 3040 | } |
2988 | 3041 | ||
2989 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { | 3042 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
2990 | create_kmalloc_cache(&kmalloc_caches[i], | 3043 | create_kmalloc_cache(&kmalloc_caches[i], |
2991 | "kmalloc", 1 << i, GFP_KERNEL); | 3044 | "kmalloc", 1 << i, GFP_KERNEL); |
2992 | caches++; | 3045 | caches++; |
@@ -3023,7 +3076,7 @@ void __init kmem_cache_init(void) | |||
3023 | slab_state = UP; | 3076 | slab_state = UP; |
3024 | 3077 | ||
3025 | /* Provide the correct kmalloc names now that the caches are up */ | 3078 | /* Provide the correct kmalloc names now that the caches are up */ |
3026 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) | 3079 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) |
3027 | kmalloc_caches[i]. name = | 3080 | kmalloc_caches[i]. name = |
3028 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 3081 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); |
3029 | 3082 | ||
@@ -3222,8 +3275,9 @@ static struct notifier_block __cpuinitdata slab_notifier = { | |||
3222 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | 3275 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
3223 | { | 3276 | { |
3224 | struct kmem_cache *s; | 3277 | struct kmem_cache *s; |
3278 | void *ret; | ||
3225 | 3279 | ||
3226 | if (unlikely(size > PAGE_SIZE)) | 3280 | if (unlikely(size > SLUB_MAX_SIZE)) |
3227 | return kmalloc_large(size, gfpflags); | 3281 | return kmalloc_large(size, gfpflags); |
3228 | 3282 | ||
3229 | s = get_slab(size, gfpflags); | 3283 | s = get_slab(size, gfpflags); |
@@ -3231,15 +3285,22 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
3231 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3285 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3232 | return s; | 3286 | return s; |
3233 | 3287 | ||
3234 | return slab_alloc(s, gfpflags, -1, caller); | 3288 | ret = slab_alloc(s, gfpflags, -1, caller); |
3289 | |||
3290 | /* Honor the call site pointer we recieved. */ | ||
3291 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size, | ||
3292 | s->size, gfpflags); | ||
3293 | |||
3294 | return ret; | ||
3235 | } | 3295 | } |
3236 | 3296 | ||
3237 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 3297 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
3238 | int node, unsigned long caller) | 3298 | int node, unsigned long caller) |
3239 | { | 3299 | { |
3240 | struct kmem_cache *s; | 3300 | struct kmem_cache *s; |
3301 | void *ret; | ||
3241 | 3302 | ||
3242 | if (unlikely(size > PAGE_SIZE)) | 3303 | if (unlikely(size > SLUB_MAX_SIZE)) |
3243 | return kmalloc_large_node(size, gfpflags, node); | 3304 | return kmalloc_large_node(size, gfpflags, node); |
3244 | 3305 | ||
3245 | s = get_slab(size, gfpflags); | 3306 | s = get_slab(size, gfpflags); |
@@ -3247,7 +3308,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3247 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3308 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3248 | return s; | 3309 | return s; |
3249 | 3310 | ||
3250 | return slab_alloc(s, gfpflags, node, caller); | 3311 | ret = slab_alloc(s, gfpflags, node, caller); |
3312 | |||
3313 | /* Honor the call site pointer we recieved. */ | ||
3314 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret, | ||
3315 | size, s->size, gfpflags, node); | ||
3316 | |||
3317 | return ret; | ||
3251 | } | 3318 | } |
3252 | 3319 | ||
3253 | #ifdef CONFIG_SLUB_DEBUG | 3320 | #ifdef CONFIG_SLUB_DEBUG |