diff options
author | Pekka Enberg <penberg@cs.helsinki.fi> | 2006-06-23 05:03:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 10:42:51 -0400 |
commit | ddc2e812d592457747c4367fb73edcaa8e1e49ff (patch) | |
tree | 7dc9066c1e43d14f7c495847a9f0b24287806fb2 | |
parent | 8d3c138b77f195ca0eee6fb639ae73f5ea9edb6b (diff) |
[PATCH] slab: verify pointers before free
Passing an invalid pointer to kfree() and kmem_cache_free() is likely to
cause bad memory corruption or even take down the whole system because the
bad pointer is likely reused immediately due to the per-CPU caches. Until
now, we don't do any verification for this if CONFIG_DEBUG_SLAB is
disabled.
As suggested by Linus, add PageSlab check to page_to_cache() and
page_to_slab() to verify pointers passed to kfree(). Also, move the
stronger check from cache_free_debugcheck() to kmem_cache_free() to ensure
the passed pointer actually belongs to the cache we're about to free the
object.
For page_to_cache() and page_to_slab(), the assertions should have
virtually no extra cost (two instructions, no data cache pressure) and for
kmem_cache_free() the overhead should be minimal.
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/slab.c | 13 |
1 files changed, 4 insertions, 9 deletions
@@ -592,6 +592,7 @@ static inline struct kmem_cache *page_get_cache(struct page *page) | |||
592 | { | 592 | { |
593 | if (unlikely(PageCompound(page))) | 593 | if (unlikely(PageCompound(page))) |
594 | page = (struct page *)page_private(page); | 594 | page = (struct page *)page_private(page); |
595 | BUG_ON(!PageSlab(page)); | ||
595 | return (struct kmem_cache *)page->lru.next; | 596 | return (struct kmem_cache *)page->lru.next; |
596 | } | 597 | } |
597 | 598 | ||
@@ -604,6 +605,7 @@ static inline struct slab *page_get_slab(struct page *page) | |||
604 | { | 605 | { |
605 | if (unlikely(PageCompound(page))) | 606 | if (unlikely(PageCompound(page))) |
606 | page = (struct page *)page_private(page); | 607 | page = (struct page *)page_private(page); |
608 | BUG_ON(!PageSlab(page)); | ||
607 | return (struct slab *)page->lru.prev; | 609 | return (struct slab *)page->lru.prev; |
608 | } | 610 | } |
609 | 611 | ||
@@ -2669,15 +2671,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
2669 | kfree_debugcheck(objp); | 2671 | kfree_debugcheck(objp); |
2670 | page = virt_to_page(objp); | 2672 | page = virt_to_page(objp); |
2671 | 2673 | ||
2672 | if (page_get_cache(page) != cachep) { | ||
2673 | printk(KERN_ERR "mismatch in kmem_cache_free: expected " | ||
2674 | "cache %p, got %p\n", | ||
2675 | page_get_cache(page), cachep); | ||
2676 | printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); | ||
2677 | printk(KERN_ERR "%p is %s.\n", page_get_cache(page), | ||
2678 | page_get_cache(page)->name); | ||
2679 | WARN_ON(1); | ||
2680 | } | ||
2681 | slabp = page_get_slab(page); | 2674 | slabp = page_get_slab(page); |
2682 | 2675 | ||
2683 | if (cachep->flags & SLAB_RED_ZONE) { | 2676 | if (cachep->flags & SLAB_RED_ZONE) { |
@@ -3393,6 +3386,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
3393 | { | 3386 | { |
3394 | unsigned long flags; | 3387 | unsigned long flags; |
3395 | 3388 | ||
3389 | BUG_ON(virt_to_cache(objp) != cachep); | ||
3390 | |||
3396 | local_irq_save(flags); | 3391 | local_irq_save(flags); |
3397 | __cache_free(cachep, objp); | 3392 | __cache_free(cachep, objp); |
3398 | local_irq_restore(flags); | 3393 | local_irq_restore(flags); |