diff options
author | Nick Piggin <npiggin@kernel.dk> | 2011-01-07 01:49:17 -0500 |
---|---|---|
committer | Nick Piggin <npiggin@kernel.dk> | 2011-01-07 01:50:16 -0500 |
commit | ccd35fb9f4da856b105ea0f1e0cab3702e8ae6ba (patch) | |
tree | acb71aa4ae7d1f1ed17bdd79033a6bad5e27186d | |
parent | 786a5e15b613a9cee4fc9139fc3113a5ab0fde79 (diff) |
kernel: kmem_ptr_validate considered harmful
This is a nasty and error prone API. It is no longer used, remove it.
Signed-off-by: Nick Piggin <npiggin@kernel.dk>
-rw-r--r-- | include/linux/slab.h | 2 | ||||
-rw-r--r-- | mm/slab.c | 32 | ||||
-rw-r--r-- | mm/slob.c | 5 | ||||
-rw-r--r-- | mm/slub.c | 40 | ||||
-rw-r--r-- | mm/util.c | 21 |
5 files changed, 1 insertions, 99 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 59260e21bdf5..fa9086647eb7 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -106,8 +106,6 @@ int kmem_cache_shrink(struct kmem_cache *); | |||
106 | void kmem_cache_free(struct kmem_cache *, void *); | 106 | void kmem_cache_free(struct kmem_cache *, void *); |
107 | unsigned int kmem_cache_size(struct kmem_cache *); | 107 | unsigned int kmem_cache_size(struct kmem_cache *); |
108 | const char *kmem_cache_name(struct kmem_cache *); | 108 | const char *kmem_cache_name(struct kmem_cache *); |
109 | int kern_ptr_validate(const void *ptr, unsigned long size); | ||
110 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); | ||
111 | 109 | ||
112 | /* | 110 | /* |
113 | * Please use this macro to create slab caches. Simply specify the | 111 | * Please use this macro to create slab caches. Simply specify the |
@@ -2781,7 +2781,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, | |||
2781 | /* | 2781 | /* |
2782 | * Map pages beginning at addr to the given cache and slab. This is required | 2782 | * Map pages beginning at addr to the given cache and slab. This is required |
2783 | * for the slab allocator to be able to lookup the cache and slab of a | 2783 | * for the slab allocator to be able to lookup the cache and slab of a |
2784 | * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. | 2784 | * virtual address for kfree, ksize, and slab debugging. |
2785 | */ | 2785 | */ |
2786 | static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, | 2786 | static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, |
2787 | void *addr) | 2787 | void *addr) |
@@ -3660,36 +3660,6 @@ void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | |||
3660 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | 3660 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); |
3661 | #endif | 3661 | #endif |
3662 | 3662 | ||
3663 | /** | ||
3664 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. | ||
3665 | * @cachep: the cache we're checking against | ||
3666 | * @ptr: pointer to validate | ||
3667 | * | ||
3668 | * This verifies that the untrusted pointer looks sane; | ||
3669 | * it is _not_ a guarantee that the pointer is actually | ||
3670 | * part of the slab cache in question, but it at least | ||
3671 | * validates that the pointer can be dereferenced and | ||
3672 | * looks half-way sane. | ||
3673 | * | ||
3674 | * Currently only used for dentry validation. | ||
3675 | */ | ||
3676 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) | ||
3677 | { | ||
3678 | unsigned long size = cachep->buffer_size; | ||
3679 | struct page *page; | ||
3680 | |||
3681 | if (unlikely(!kern_ptr_validate(ptr, size))) | ||
3682 | goto out; | ||
3683 | page = virt_to_page(ptr); | ||
3684 | if (unlikely(!PageSlab(page))) | ||
3685 | goto out; | ||
3686 | if (unlikely(page_get_cache(page) != cachep)) | ||
3687 | goto out; | ||
3688 | return 1; | ||
3689 | out: | ||
3690 | return 0; | ||
3691 | } | ||
3692 | |||
3693 | #ifdef CONFIG_NUMA | 3663 | #ifdef CONFIG_NUMA |
3694 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3664 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
3695 | { | 3665 | { |
@@ -678,11 +678,6 @@ int kmem_cache_shrink(struct kmem_cache *d) | |||
678 | } | 678 | } |
679 | EXPORT_SYMBOL(kmem_cache_shrink); | 679 | EXPORT_SYMBOL(kmem_cache_shrink); |
680 | 680 | ||
681 | int kmem_ptr_validate(struct kmem_cache *a, const void *b) | ||
682 | { | ||
683 | return 0; | ||
684 | } | ||
685 | |||
686 | static unsigned int slob_ready __read_mostly; | 681 | static unsigned int slob_ready __read_mostly; |
687 | 682 | ||
688 | int slab_is_available(void) | 683 | int slab_is_available(void) |
@@ -1917,17 +1917,6 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1917 | } | 1917 | } |
1918 | EXPORT_SYMBOL(kmem_cache_free); | 1918 | EXPORT_SYMBOL(kmem_cache_free); |
1919 | 1919 | ||
1920 | /* Figure out on which slab page the object resides */ | ||
1921 | static struct page *get_object_page(const void *x) | ||
1922 | { | ||
1923 | struct page *page = virt_to_head_page(x); | ||
1924 | |||
1925 | if (!PageSlab(page)) | ||
1926 | return NULL; | ||
1927 | |||
1928 | return page; | ||
1929 | } | ||
1930 | |||
1931 | /* | 1920 | /* |
1932 | * Object placement in a slab is made very easy because we always start at | 1921 | * Object placement in a slab is made very easy because we always start at |
1933 | * offset 0. If we tune the size of the object to the alignment then we can | 1922 | * offset 0. If we tune the size of the object to the alignment then we can |
@@ -2386,35 +2375,6 @@ error: | |||
2386 | } | 2375 | } |
2387 | 2376 | ||
2388 | /* | 2377 | /* |
2389 | * Check if a given pointer is valid | ||
2390 | */ | ||
2391 | int kmem_ptr_validate(struct kmem_cache *s, const void *object) | ||
2392 | { | ||
2393 | struct page *page; | ||
2394 | |||
2395 | if (!kern_ptr_validate(object, s->size)) | ||
2396 | return 0; | ||
2397 | |||
2398 | page = get_object_page(object); | ||
2399 | |||
2400 | if (!page || s != page->slab) | ||
2401 | /* No slab or wrong slab */ | ||
2402 | return 0; | ||
2403 | |||
2404 | if (!check_valid_pointer(s, page, object)) | ||
2405 | return 0; | ||
2406 | |||
2407 | /* | ||
2408 | * We could also check if the object is on the slabs freelist. | ||
2409 | * But this would be too expensive and it seems that the main | ||
2410 | * purpose of kmem_ptr_valid() is to check if the object belongs | ||
2411 | * to a certain slab. | ||
2412 | */ | ||
2413 | return 1; | ||
2414 | } | ||
2415 | EXPORT_SYMBOL(kmem_ptr_validate); | ||
2416 | |||
2417 | /* | ||
2418 | * Determine the size of a slab object | 2378 | * Determine the size of a slab object |
2419 | */ | 2379 | */ |
2420 | unsigned int kmem_cache_size(struct kmem_cache *s) | 2380 | unsigned int kmem_cache_size(struct kmem_cache *s) |
@@ -186,27 +186,6 @@ void kzfree(const void *p) | |||
186 | } | 186 | } |
187 | EXPORT_SYMBOL(kzfree); | 187 | EXPORT_SYMBOL(kzfree); |
188 | 188 | ||
189 | int kern_ptr_validate(const void *ptr, unsigned long size) | ||
190 | { | ||
191 | unsigned long addr = (unsigned long)ptr; | ||
192 | unsigned long min_addr = PAGE_OFFSET; | ||
193 | unsigned long align_mask = sizeof(void *) - 1; | ||
194 | |||
195 | if (unlikely(addr < min_addr)) | ||
196 | goto out; | ||
197 | if (unlikely(addr > (unsigned long)high_memory - size)) | ||
198 | goto out; | ||
199 | if (unlikely(addr & align_mask)) | ||
200 | goto out; | ||
201 | if (unlikely(!kern_addr_valid(addr))) | ||
202 | goto out; | ||
203 | if (unlikely(!kern_addr_valid(addr + size - 1))) | ||
204 | goto out; | ||
205 | return 1; | ||
206 | out: | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | /* | 189 | /* |
211 | * strndup_user - duplicate an existing string from user space | 190 | * strndup_user - duplicate an existing string from user space |
212 | * @s: The string to duplicate | 191 | * @s: The string to duplicate |