diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 82 |
1 files changed, 30 insertions, 52 deletions
@@ -284,7 +284,7 @@ struct kmem_list3 { | |||
284 | * Need this for bootstrapping a per node allocator. | 284 | * Need this for bootstrapping a per node allocator. |
285 | */ | 285 | */ |
286 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) | 286 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) |
287 | struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; | 287 | static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; |
288 | #define CACHE_CACHE 0 | 288 | #define CACHE_CACHE 0 |
289 | #define SIZE_AC MAX_NUMNODES | 289 | #define SIZE_AC MAX_NUMNODES |
290 | #define SIZE_L3 (2 * MAX_NUMNODES) | 290 | #define SIZE_L3 (2 * MAX_NUMNODES) |
@@ -829,12 +829,12 @@ static void init_reap_node(int cpu) | |||
829 | 829 | ||
830 | static void next_reap_node(void) | 830 | static void next_reap_node(void) |
831 | { | 831 | { |
832 | int node = __get_cpu_var(slab_reap_node); | 832 | int node = __this_cpu_read(slab_reap_node); |
833 | 833 | ||
834 | node = next_node(node, node_online_map); | 834 | node = next_node(node, node_online_map); |
835 | if (unlikely(node >= MAX_NUMNODES)) | 835 | if (unlikely(node >= MAX_NUMNODES)) |
836 | node = first_node(node_online_map); | 836 | node = first_node(node_online_map); |
837 | __get_cpu_var(slab_reap_node) = node; | 837 | __this_cpu_write(slab_reap_node, node); |
838 | } | 838 | } |
839 | 839 | ||
840 | #else | 840 | #else |
@@ -1012,7 +1012,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, | |||
1012 | */ | 1012 | */ |
1013 | static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) | 1013 | static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) |
1014 | { | 1014 | { |
1015 | int node = __get_cpu_var(slab_reap_node); | 1015 | int node = __this_cpu_read(slab_reap_node); |
1016 | 1016 | ||
1017 | if (l3->alien) { | 1017 | if (l3->alien) { |
1018 | struct array_cache *ac = l3->alien[node]; | 1018 | struct array_cache *ac = l3->alien[node]; |
@@ -1293,7 +1293,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1293 | * anything expensive but will only modify reap_work | 1293 | * anything expensive but will only modify reap_work |
1294 | * and reschedule the timer. | 1294 | * and reschedule the timer. |
1295 | */ | 1295 | */ |
1296 | cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu)); | 1296 | cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); |
1297 | /* Now the cache_reaper is guaranteed to be not running. */ | 1297 | /* Now the cache_reaper is guaranteed to be not running. */ |
1298 | per_cpu(slab_reap_work, cpu).work.func = NULL; | 1298 | per_cpu(slab_reap_work, cpu).work.func = NULL; |
1299 | break; | 1299 | break; |
@@ -2781,7 +2781,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, | |||
2781 | /* | 2781 | /* |
2782 | * Map pages beginning at addr to the given cache and slab. This is required | 2782 | * Map pages beginning at addr to the given cache and slab. This is required |
2783 | * for the slab allocator to be able to lookup the cache and slab of a | 2783 | * for the slab allocator to be able to lookup the cache and slab of a |
2784 | * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. | 2784 | * virtual address for kfree, ksize, and slab debugging. |
2785 | */ | 2785 | */ |
2786 | static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, | 2786 | static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, |
2787 | void *addr) | 2787 | void *addr) |
@@ -3653,42 +3653,19 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3653 | EXPORT_SYMBOL(kmem_cache_alloc); | 3653 | EXPORT_SYMBOL(kmem_cache_alloc); |
3654 | 3654 | ||
3655 | #ifdef CONFIG_TRACING | 3655 | #ifdef CONFIG_TRACING |
3656 | void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | 3656 | void * |
3657 | kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) | ||
3657 | { | 3658 | { |
3658 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3659 | void *ret; |
3659 | } | ||
3660 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
3661 | #endif | ||
3662 | 3660 | ||
3663 | /** | 3661 | ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); |
3664 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. | ||
3665 | * @cachep: the cache we're checking against | ||
3666 | * @ptr: pointer to validate | ||
3667 | * | ||
3668 | * This verifies that the untrusted pointer looks sane; | ||
3669 | * it is _not_ a guarantee that the pointer is actually | ||
3670 | * part of the slab cache in question, but it at least | ||
3671 | * validates that the pointer can be dereferenced and | ||
3672 | * looks half-way sane. | ||
3673 | * | ||
3674 | * Currently only used for dentry validation. | ||
3675 | */ | ||
3676 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) | ||
3677 | { | ||
3678 | unsigned long size = cachep->buffer_size; | ||
3679 | struct page *page; | ||
3680 | 3662 | ||
3681 | if (unlikely(!kern_ptr_validate(ptr, size))) | 3663 | trace_kmalloc(_RET_IP_, ret, |
3682 | goto out; | 3664 | size, slab_buffer_size(cachep), flags); |
3683 | page = virt_to_page(ptr); | 3665 | return ret; |
3684 | if (unlikely(!PageSlab(page))) | ||
3685 | goto out; | ||
3686 | if (unlikely(page_get_cache(page) != cachep)) | ||
3687 | goto out; | ||
3688 | return 1; | ||
3689 | out: | ||
3690 | return 0; | ||
3691 | } | 3666 | } |
3667 | EXPORT_SYMBOL(kmem_cache_alloc_trace); | ||
3668 | #endif | ||
3692 | 3669 | ||
3693 | #ifdef CONFIG_NUMA | 3670 | #ifdef CONFIG_NUMA |
3694 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3671 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
@@ -3705,31 +3682,32 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
3705 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 3682 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
3706 | 3683 | ||
3707 | #ifdef CONFIG_TRACING | 3684 | #ifdef CONFIG_TRACING |
3708 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | 3685 | void *kmem_cache_alloc_node_trace(size_t size, |
3709 | gfp_t flags, | 3686 | struct kmem_cache *cachep, |
3710 | int nodeid) | 3687 | gfp_t flags, |
3688 | int nodeid) | ||
3711 | { | 3689 | { |
3712 | return __cache_alloc_node(cachep, flags, nodeid, | 3690 | void *ret; |
3691 | |||
3692 | ret = __cache_alloc_node(cachep, flags, nodeid, | ||
3713 | __builtin_return_address(0)); | 3693 | __builtin_return_address(0)); |
3694 | trace_kmalloc_node(_RET_IP_, ret, | ||
3695 | size, slab_buffer_size(cachep), | ||
3696 | flags, nodeid); | ||
3697 | return ret; | ||
3714 | } | 3698 | } |
3715 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | 3699 | EXPORT_SYMBOL(kmem_cache_alloc_node_trace); |
3716 | #endif | 3700 | #endif |
3717 | 3701 | ||
3718 | static __always_inline void * | 3702 | static __always_inline void * |
3719 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | 3703 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) |
3720 | { | 3704 | { |
3721 | struct kmem_cache *cachep; | 3705 | struct kmem_cache *cachep; |
3722 | void *ret; | ||
3723 | 3706 | ||
3724 | cachep = kmem_find_general_cachep(size, flags); | 3707 | cachep = kmem_find_general_cachep(size, flags); |
3725 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3708 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3726 | return cachep; | 3709 | return cachep; |
3727 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); | 3710 | return kmem_cache_alloc_node_trace(size, cachep, flags, node); |
3728 | |||
3729 | trace_kmalloc_node((unsigned long) caller, ret, | ||
3730 | size, cachep->buffer_size, flags, node); | ||
3731 | |||
3732 | return ret; | ||
3733 | } | 3711 | } |
3734 | 3712 | ||
3735 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) | 3713 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) |
@@ -4075,7 +4053,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) | |||
4075 | * necessary. Note that the l3 listlock also protects the array_cache | 4053 | * necessary. Note that the l3 listlock also protects the array_cache |
4076 | * if drain_array() is used on the shared array. | 4054 | * if drain_array() is used on the shared array. |
4077 | */ | 4055 | */ |
4078 | void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | 4056 | static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, |
4079 | struct array_cache *ac, int force, int node) | 4057 | struct array_cache *ac, int force, int node) |
4080 | { | 4058 | { |
4081 | int tofree; | 4059 | int tofree; |
@@ -4339,7 +4317,7 @@ static const struct seq_operations slabinfo_op = { | |||
4339 | * @count: data length | 4317 | * @count: data length |
4340 | * @ppos: unused | 4318 | * @ppos: unused |
4341 | */ | 4319 | */ |
4342 | ssize_t slabinfo_write(struct file *file, const char __user * buffer, | 4320 | static ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
4343 | size_t count, loff_t *ppos) | 4321 | size_t count, loff_t *ppos) |
4344 | { | 4322 | { |
4345 | char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; | 4323 | char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; |