aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2010-08-20 13:37:17 -0400
committerPekka Enberg <penberg@kernel.org>2010-10-02 03:24:27 -0400
commitc1d508365ea07f9f8849c7da5b02186601570f8b (patch)
treea087b2cda4546c51ef7ab4ba3c7764129a63e555 /mm/slub.c
parentc016b0bdeee74a7fbe5179937c0d667eabcf379e (diff)
slub: Move gfpflag masking out of the hotpath
Move the gfpflags masking into the hooks for checkers and into the slowpaths. gfpflag masking requires access to a global variable and thus adds an additional cacheline reference to the hotpaths. If no hooks are active then the gfpflag masking will result in code that the compiler can toss out. Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index ca49d02b5ff8..6608f2bc310b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -796,6 +796,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
796 */ 796 */
797static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 797static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
798{ 798{
799 flags &= gfp_allowed_mask;
799 lockdep_trace_alloc(flags); 800 lockdep_trace_alloc(flags);
800 might_sleep_if(flags & __GFP_WAIT); 801 might_sleep_if(flags & __GFP_WAIT);
801 802
@@ -804,6 +805,7 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
804 805
805static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 806static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
806{ 807{
808 flags &= gfp_allowed_mask;
807 kmemcheck_slab_alloc(s, flags, object, s->objsize); 809 kmemcheck_slab_alloc(s, flags, object, s->objsize);
808 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); 810 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
809} 811}
@@ -1677,6 +1679,7 @@ new_slab:
1677 goto load_freelist; 1679 goto load_freelist;
1678 } 1680 }
1679 1681
1682 gfpflags &= gfp_allowed_mask;
1680 if (gfpflags & __GFP_WAIT) 1683 if (gfpflags & __GFP_WAIT)
1681 local_irq_enable(); 1684 local_irq_enable();
1682 1685
@@ -1725,8 +1728,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1725 struct kmem_cache_cpu *c; 1728 struct kmem_cache_cpu *c;
1726 unsigned long flags; 1729 unsigned long flags;
1727 1730
1728 gfpflags &= gfp_allowed_mask;
1729
1730 if (slab_pre_alloc_hook(s, gfpflags)) 1731 if (slab_pre_alloc_hook(s, gfpflags))
1731 return NULL; 1732 return NULL;
1732 1733