aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-07-31 19:44:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:46 -0400
commit381760eadc393bcb1bb328510ad75cf13431806d (patch)
tree4db4e80b82ea78e1b486ebde7feb835a2de5c2ef /mm
parentb4b9e3558508980fc0cd161a545ffb55a1f13ee9 (diff)
mm: micro-optimise slab to avoid a function call
Getting and putting objects in SLAB currently requires a function call but the bulk of the work is related to PFMEMALLOC reserves which are only consumed when network-backed storage is critical. Use an inline function to determine if the function call is required. Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: David Miller <davem@davemloft.net> Cc: Neil Brown <neilb@suse.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Christie <michaelc@cs.wisc.edu> Cc: Eric B Munson <emunson@mgebm.net> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Sebastian Andrzej Siewior <sebastian@breakpoint.cc> Cc: Mel Gorman <mgorman@suse.de> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 77be18dab73c..f8b0d539b482 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -118,6 +118,8 @@
118#include <linux/memory.h> 118#include <linux/memory.h>
119#include <linux/prefetch.h> 119#include <linux/prefetch.h>
120 120
121#include <net/sock.h>
122
121#include <asm/cacheflush.h> 123#include <asm/cacheflush.h>
122#include <asm/tlbflush.h> 124#include <asm/tlbflush.h>
123#include <asm/page.h> 125#include <asm/page.h>
@@ -965,7 +967,7 @@ out:
965 spin_unlock_irqrestore(&l3->list_lock, flags); 967 spin_unlock_irqrestore(&l3->list_lock, flags);
966} 968}
967 969
968static void *ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, 970static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
969 gfp_t flags, bool force_refill) 971 gfp_t flags, bool force_refill)
970{ 972{
971 int i; 973 int i;
@@ -1012,7 +1014,20 @@ static void *ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
1012 return objp; 1014 return objp;
1013} 1015}
1014 1016
1015static void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, 1017static inline void *ac_get_obj(struct kmem_cache *cachep,
1018 struct array_cache *ac, gfp_t flags, bool force_refill)
1019{
1020 void *objp;
1021
1022 if (unlikely(sk_memalloc_socks()))
1023 objp = __ac_get_obj(cachep, ac, flags, force_refill);
1024 else
1025 objp = ac->entry[--ac->avail];
1026
1027 return objp;
1028}
1029
1030static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
1016 void *objp) 1031 void *objp)
1017{ 1032{
1018 if (unlikely(pfmemalloc_active)) { 1033 if (unlikely(pfmemalloc_active)) {
@@ -1022,6 +1037,15 @@ static void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
1022 set_obj_pfmemalloc(&objp); 1037 set_obj_pfmemalloc(&objp);
1023 } 1038 }
1024 1039
1040 return objp;
1041}
1042
1043static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
1044 void *objp)
1045{
1046 if (unlikely(sk_memalloc_socks()))
1047 objp = __ac_put_obj(cachep, ac, objp);
1048
1025 ac->entry[ac->avail++] = objp; 1049 ac->entry[ac->avail++] = objp;
1026} 1050}
1027 1051