aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2010-08-20 13:37:16 -0400
committerPekka Enberg <penberg@kernel.org>2010-10-02 03:24:27 -0400
commitc016b0bdeee74a7fbe5179937c0d667eabcf379e (patch)
treeb1544516251bbf25de94ad67f353b7986647bbe5 /mm/slub.c
parent51df1142816e469173889fb6d6dc810be9b9e022 (diff)
slub: Extract hooks for memory checkers from hotpaths
Extract the code that memory checkers and other verification tools use from the hotpaths. Makes it easier to add new ones and reduces the disturbances of the hotpaths. Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c49
1 files changed, 38 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 94fee96da0d2..ca49d02b5ff8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -791,6 +791,37 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
791} 791}
792 792
793/* 793/*
794 * Hooks for other subsystems that check memory allocations. In a typical
795 * production configuration these hooks all should produce no code at all.
796 */
797static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
798{
799 lockdep_trace_alloc(flags);
800 might_sleep_if(flags & __GFP_WAIT);
801
802 return should_failslab(s->objsize, flags, s->flags);
803}
804
805static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
806{
807 kmemcheck_slab_alloc(s, flags, object, s->objsize);
808 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
809}
810
811static inline void slab_free_hook(struct kmem_cache *s, void *x)
812{
813 kmemleak_free_recursive(x, s->flags);
814}
815
816static inline void slab_free_hook_irq(struct kmem_cache *s, void *object)
817{
818 kmemcheck_slab_free(s, object, s->objsize);
819 debug_check_no_locks_freed(object, s->objsize);
820 if (!(s->flags & SLAB_DEBUG_OBJECTS))
821 debug_check_no_obj_freed(object, s->objsize);
822}
823
824/*
794 * Tracking of fully allocated slabs for debugging purposes. 825 * Tracking of fully allocated slabs for debugging purposes.
795 */ 826 */
796static void add_full(struct kmem_cache_node *n, struct page *page) 827static void add_full(struct kmem_cache_node *n, struct page *page)
@@ -1696,10 +1727,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1696 1727
1697 gfpflags &= gfp_allowed_mask; 1728 gfpflags &= gfp_allowed_mask;
1698 1729
1699 lockdep_trace_alloc(gfpflags); 1730 if (slab_pre_alloc_hook(s, gfpflags))
1700 might_sleep_if(gfpflags & __GFP_WAIT);
1701
1702 if (should_failslab(s->objsize, gfpflags, s->flags))
1703 return NULL; 1731 return NULL;
1704 1732
1705 local_irq_save(flags); 1733 local_irq_save(flags);
@@ -1718,8 +1746,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1718 if (unlikely(gfpflags & __GFP_ZERO) && object) 1746 if (unlikely(gfpflags & __GFP_ZERO) && object)
1719 memset(object, 0, s->objsize); 1747 memset(object, 0, s->objsize);
1720 1748
1721 kmemcheck_slab_alloc(s, gfpflags, object, s->objsize); 1749 slab_post_alloc_hook(s, gfpflags, object);
1722 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
1723 1750
1724 return object; 1751 return object;
1725} 1752}
@@ -1849,13 +1876,13 @@ static __always_inline void slab_free(struct kmem_cache *s,
1849 struct kmem_cache_cpu *c; 1876 struct kmem_cache_cpu *c;
1850 unsigned long flags; 1877 unsigned long flags;
1851 1878
1852 kmemleak_free_recursive(x, s->flags); 1879 slab_free_hook(s, x);
1880
1853 local_irq_save(flags); 1881 local_irq_save(flags);
1854 c = __this_cpu_ptr(s->cpu_slab); 1882 c = __this_cpu_ptr(s->cpu_slab);
1855 kmemcheck_slab_free(s, object, s->objsize); 1883
1856 debug_check_no_locks_freed(object, s->objsize); 1884 slab_free_hook_irq(s, x);
1857 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1885
1858 debug_check_no_obj_freed(object, s->objsize);
1859 if (likely(page == c->page && c->node >= 0)) { 1886 if (likely(page == c->page && c->node >= 0)) {
1860 set_freepointer(s, object, c->freelist); 1887 set_freepointer(s, object, c->freelist);
1861 c->freelist = object; 1888 c->freelist = object;