diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 40 |
1 files changed, 5 insertions, 35 deletions
@@ -829,12 +829,12 @@ static void init_reap_node(int cpu) | |||
829 | 829 | ||
830 | static void next_reap_node(void) | 830 | static void next_reap_node(void) |
831 | { | 831 | { |
832 | int node = __get_cpu_var(slab_reap_node); | 832 | int node = __this_cpu_read(slab_reap_node); |
833 | 833 | ||
834 | node = next_node(node, node_online_map); | 834 | node = next_node(node, node_online_map); |
835 | if (unlikely(node >= MAX_NUMNODES)) | 835 | if (unlikely(node >= MAX_NUMNODES)) |
836 | node = first_node(node_online_map); | 836 | node = first_node(node_online_map); |
837 | __get_cpu_var(slab_reap_node) = node; | 837 | __this_cpu_write(slab_reap_node, node); |
838 | } | 838 | } |
839 | 839 | ||
840 | #else | 840 | #else |
@@ -1012,7 +1012,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, | |||
1012 | */ | 1012 | */ |
1013 | static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) | 1013 | static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) |
1014 | { | 1014 | { |
1015 | int node = __get_cpu_var(slab_reap_node); | 1015 | int node = __this_cpu_read(slab_reap_node); |
1016 | 1016 | ||
1017 | if (l3->alien) { | 1017 | if (l3->alien) { |
1018 | struct array_cache *ac = l3->alien[node]; | 1018 | struct array_cache *ac = l3->alien[node]; |
@@ -1293,7 +1293,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1293 | * anything expensive but will only modify reap_work | 1293 | * anything expensive but will only modify reap_work |
1294 | * and reschedule the timer. | 1294 | * and reschedule the timer. |
1295 | */ | 1295 | */ |
1296 | cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu)); | 1296 | cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); |
1297 | /* Now the cache_reaper is guaranteed to be not running. */ | 1297 | /* Now the cache_reaper is guaranteed to be not running. */ |
1298 | per_cpu(slab_reap_work, cpu).work.func = NULL; | 1298 | per_cpu(slab_reap_work, cpu).work.func = NULL; |
1299 | break; | 1299 | break; |
@@ -2781,7 +2781,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, | |||
2781 | /* | 2781 | /* |
2782 | * Map pages beginning at addr to the given cache and slab. This is required | 2782 | * Map pages beginning at addr to the given cache and slab. This is required |
2783 | * for the slab allocator to be able to lookup the cache and slab of a | 2783 | * for the slab allocator to be able to lookup the cache and slab of a |
2784 | * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. | 2784 | * virtual address for kfree, ksize, and slab debugging. |
2785 | */ | 2785 | */ |
2786 | static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, | 2786 | static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, |
2787 | void *addr) | 2787 | void *addr) |
@@ -3667,36 +3667,6 @@ kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) | |||
3667 | EXPORT_SYMBOL(kmem_cache_alloc_trace); | 3667 | EXPORT_SYMBOL(kmem_cache_alloc_trace); |
3668 | #endif | 3668 | #endif |
3669 | 3669 | ||
3670 | /** | ||
3671 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. | ||
3672 | * @cachep: the cache we're checking against | ||
3673 | * @ptr: pointer to validate | ||
3674 | * | ||
3675 | * This verifies that the untrusted pointer looks sane; | ||
3676 | * it is _not_ a guarantee that the pointer is actually | ||
3677 | * part of the slab cache in question, but it at least | ||
3678 | * validates that the pointer can be dereferenced and | ||
3679 | * looks half-way sane. | ||
3680 | * | ||
3681 | * Currently only used for dentry validation. | ||
3682 | */ | ||
3683 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) | ||
3684 | { | ||
3685 | unsigned long size = cachep->buffer_size; | ||
3686 | struct page *page; | ||
3687 | |||
3688 | if (unlikely(!kern_ptr_validate(ptr, size))) | ||
3689 | goto out; | ||
3690 | page = virt_to_page(ptr); | ||
3691 | if (unlikely(!PageSlab(page))) | ||
3692 | goto out; | ||
3693 | if (unlikely(page_get_cache(page) != cachep)) | ||
3694 | goto out; | ||
3695 | return 1; | ||
3696 | out: | ||
3697 | return 0; | ||
3698 | } | ||
3699 | |||
3700 | #ifdef CONFIG_NUMA | 3670 | #ifdef CONFIG_NUMA |
3701 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3671 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
3702 | { | 3672 | { |