aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-02-25 12:38:52 -0500
committerPekka Enberg <penberg@kernel.org>2011-03-11 10:42:49 -0500
commitd3f661d69a486db0e0e6343b452f45d91b4b3656 (patch)
tree4b4c882c8690b52b922214f5864d9a678c0c2a06 /mm/slub.c
parent1a757fe5d4234293d6a3acccd7196f1386443956 (diff)
slub: Get rid of slab_free_hook_irq()
The following patch will make the fastpaths lockless and will no longer require interrupts to be disabled. Calling the free hook with irq disabled will no longer be possible. Move the slab_free_hook_irq() logic into slab_free_hook. Only disable interrupts if the features are selected that require callbacks with interrupts off and reenable after calls have been made. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e15aa7f193c9..bae7a5c636f4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -807,14 +807,24 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void
807static inline void slab_free_hook(struct kmem_cache *s, void *x) 807static inline void slab_free_hook(struct kmem_cache *s, void *x)
808{ 808{
809 kmemleak_free_recursive(x, s->flags); 809 kmemleak_free_recursive(x, s->flags);
810}
811 810
812static inline void slab_free_hook_irq(struct kmem_cache *s, void *object) 811 /*
813{ 812 * Trouble is that we may no longer disable interupts in the fast path
814 kmemcheck_slab_free(s, object, s->objsize); 813 * So in order to make the debug calls that expect irqs to be
815 debug_check_no_locks_freed(object, s->objsize); 814 * disabled we need to disable interrupts temporarily.
816 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 815 */
817 debug_check_no_obj_freed(object, s->objsize); 816#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
817 {
818 unsigned long flags;
819
820 local_irq_save(flags);
821 kmemcheck_slab_free(s, x, s->objsize);
822 debug_check_no_locks_freed(x, s->objsize);
823 if (!(s->flags & SLAB_DEBUG_OBJECTS))
824 debug_check_no_obj_freed(x, s->objsize);
825 local_irq_restore(flags);
826 }
827#endif
818} 828}
819 829
820/* 830/*
@@ -1101,9 +1111,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1101 1111
1102static inline void slab_free_hook(struct kmem_cache *s, void *x) {} 1112static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1103 1113
1104static inline void slab_free_hook_irq(struct kmem_cache *s,
1105 void *object) {}
1106
1107#endif /* CONFIG_SLUB_DEBUG */ 1114#endif /* CONFIG_SLUB_DEBUG */
1108 1115
1109/* 1116/*
@@ -1909,8 +1916,6 @@ static __always_inline void slab_free(struct kmem_cache *s,
1909 local_irq_save(flags); 1916 local_irq_save(flags);
1910 c = __this_cpu_ptr(s->cpu_slab); 1917 c = __this_cpu_ptr(s->cpu_slab);
1911 1918
1912 slab_free_hook_irq(s, x);
1913
1914 if (likely(page == c->page && c->node != NUMA_NO_NODE)) { 1919 if (likely(page == c->page && c->node != NUMA_NO_NODE)) {
1915 set_freepointer(s, object, c->freelist); 1920 set_freepointer(s, object, c->freelist);
1916 c->freelist = object; 1921 c->freelist = object;