aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-08-06 19:04:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:14 -0400
commitc8522a3a5832b843570a3315674f5a3575958a51 (patch)
treef9feb05f0a13898ca60750286000de02a6575300 /mm
parent1fe00d50a9e81150de5000490b87ed227525cf09 (diff)
slab: introduce alien_cache
Currently, we use array_cache for alien_cache. Although they are mostly similar, there is one difference, that is, need for spinlock. We don't need spinlock for array_cache itself, but to use array_cache for alien_cache, array_cache structure should have spinlock. This is needless overhead, so removing it would be better. This patch prepare it by introducing alien_cache and using it. In the following patch, we remove spinlock in array_cache. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c108
-rw-r--r--mm/slab.h2
2 files changed, 68 insertions, 42 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 8d9a0fff160d..de91d6f3a2a4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -203,6 +203,11 @@ struct array_cache {
203 */ 203 */
204}; 204};
205 205
206struct alien_cache {
207 spinlock_t lock;
208 struct array_cache ac;
209};
210
206#define SLAB_OBJ_PFMEMALLOC 1 211#define SLAB_OBJ_PFMEMALLOC 1
207static inline bool is_obj_pfmemalloc(void *objp) 212static inline bool is_obj_pfmemalloc(void *objp)
208{ 213{
@@ -491,7 +496,7 @@ static void slab_set_lock_classes(struct kmem_cache *cachep,
491 struct lock_class_key *l3_key, struct lock_class_key *alc_key, 496 struct lock_class_key *l3_key, struct lock_class_key *alc_key,
492 struct kmem_cache_node *n) 497 struct kmem_cache_node *n)
493{ 498{
494 struct array_cache **alc; 499 struct alien_cache **alc;
495 int r; 500 int r;
496 501
497 lockdep_set_class(&n->list_lock, l3_key); 502 lockdep_set_class(&n->list_lock, l3_key);
@@ -507,7 +512,7 @@ static void slab_set_lock_classes(struct kmem_cache *cachep,
507 return; 512 return;
508 for_each_node(r) { 513 for_each_node(r) {
509 if (alc[r]) 514 if (alc[r])
510 lockdep_set_class(&alc[r]->lock, alc_key); 515 lockdep_set_class(&(alc[r]->ac.lock), alc_key);
511 } 516 }
512} 517}
513 518
@@ -965,12 +970,13 @@ static int transfer_objects(struct array_cache *to,
965#define drain_alien_cache(cachep, alien) do { } while (0) 970#define drain_alien_cache(cachep, alien) do { } while (0)
966#define reap_alien(cachep, n) do { } while (0) 971#define reap_alien(cachep, n) do { } while (0)
967 972
968static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) 973static inline struct alien_cache **alloc_alien_cache(int node,
974 int limit, gfp_t gfp)
969{ 975{
970 return (struct array_cache **)BAD_ALIEN_MAGIC; 976 return (struct alien_cache **)BAD_ALIEN_MAGIC;
971} 977}
972 978
973static inline void free_alien_cache(struct array_cache **ac_ptr) 979static inline void free_alien_cache(struct alien_cache **ac_ptr)
974{ 980{
975} 981}
976 982
@@ -996,40 +1002,52 @@ static inline void *____cache_alloc_node(struct kmem_cache *cachep,
996static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 1002static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
997static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 1003static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
998 1004
999static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) 1005static struct alien_cache *__alloc_alien_cache(int node, int entries,
1006 int batch, gfp_t gfp)
1007{
1008 int memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
1009 struct alien_cache *alc = NULL;
1010
1011 alc = kmalloc_node(memsize, gfp, node);
1012 init_arraycache(&alc->ac, entries, batch);
1013 return alc;
1014}
1015
1016static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1000{ 1017{
1001 struct array_cache **ac_ptr; 1018 struct alien_cache **alc_ptr;
1002 int memsize = sizeof(void *) * nr_node_ids; 1019 int memsize = sizeof(void *) * nr_node_ids;
1003 int i; 1020 int i;
1004 1021
1005 if (limit > 1) 1022 if (limit > 1)
1006 limit = 12; 1023 limit = 12;
1007 ac_ptr = kzalloc_node(memsize, gfp, node); 1024 alc_ptr = kzalloc_node(memsize, gfp, node);
1008 if (ac_ptr) { 1025 if (!alc_ptr)
1009 for_each_node(i) { 1026 return NULL;
1010 if (i == node || !node_online(i)) 1027
1011 continue; 1028 for_each_node(i) {
1012 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp); 1029 if (i == node || !node_online(i))
1013 if (!ac_ptr[i]) { 1030 continue;
1014 for (i--; i >= 0; i--) 1031 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
1015 kfree(ac_ptr[i]); 1032 if (!alc_ptr[i]) {
1016 kfree(ac_ptr); 1033 for (i--; i >= 0; i--)
1017 return NULL; 1034 kfree(alc_ptr[i]);
1018 } 1035 kfree(alc_ptr);
1036 return NULL;
1019 } 1037 }
1020 } 1038 }
1021 return ac_ptr; 1039 return alc_ptr;
1022} 1040}
1023 1041
1024static void free_alien_cache(struct array_cache **ac_ptr) 1042static void free_alien_cache(struct alien_cache **alc_ptr)
1025{ 1043{
1026 int i; 1044 int i;
1027 1045
1028 if (!ac_ptr) 1046 if (!alc_ptr)
1029 return; 1047 return;
1030 for_each_node(i) 1048 for_each_node(i)
1031 kfree(ac_ptr[i]); 1049 kfree(alc_ptr[i]);
1032 kfree(ac_ptr); 1050 kfree(alc_ptr);
1033} 1051}
1034 1052
1035static void __drain_alien_cache(struct kmem_cache *cachep, 1053static void __drain_alien_cache(struct kmem_cache *cachep,
@@ -1063,25 +1081,31 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
1063 int node = __this_cpu_read(slab_reap_node); 1081 int node = __this_cpu_read(slab_reap_node);
1064 1082
1065 if (n->alien) { 1083 if (n->alien) {
1066 struct array_cache *ac = n->alien[node]; 1084 struct alien_cache *alc = n->alien[node];
1067 1085 struct array_cache *ac;
1068 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { 1086
1069 __drain_alien_cache(cachep, ac, node); 1087 if (alc) {
1070 spin_unlock_irq(&ac->lock); 1088 ac = &alc->ac;
1089 if (ac->avail && spin_trylock_irq(&ac->lock)) {
1090 __drain_alien_cache(cachep, ac, node);
1091 spin_unlock_irq(&ac->lock);
1092 }
1071 } 1093 }
1072 } 1094 }
1073} 1095}
1074 1096
1075static void drain_alien_cache(struct kmem_cache *cachep, 1097static void drain_alien_cache(struct kmem_cache *cachep,
1076 struct array_cache **alien) 1098 struct alien_cache **alien)
1077{ 1099{
1078 int i = 0; 1100 int i = 0;
1101 struct alien_cache *alc;
1079 struct array_cache *ac; 1102 struct array_cache *ac;
1080 unsigned long flags; 1103 unsigned long flags;
1081 1104
1082 for_each_online_node(i) { 1105 for_each_online_node(i) {
1083 ac = alien[i]; 1106 alc = alien[i];
1084 if (ac) { 1107 if (alc) {
1108 ac = &alc->ac;
1085 spin_lock_irqsave(&ac->lock, flags); 1109 spin_lock_irqsave(&ac->lock, flags);
1086 __drain_alien_cache(cachep, ac, i); 1110 __drain_alien_cache(cachep, ac, i);
1087 spin_unlock_irqrestore(&ac->lock, flags); 1111 spin_unlock_irqrestore(&ac->lock, flags);
@@ -1093,7 +1117,8 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1093{ 1117{
1094 int nodeid = page_to_nid(virt_to_page(objp)); 1118 int nodeid = page_to_nid(virt_to_page(objp));
1095 struct kmem_cache_node *n; 1119 struct kmem_cache_node *n;
1096 struct array_cache *alien = NULL; 1120 struct alien_cache *alien = NULL;
1121 struct array_cache *ac;
1097 int node; 1122 int node;
1098 LIST_HEAD(list); 1123 LIST_HEAD(list);
1099 1124
@@ -1110,13 +1135,14 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1110 STATS_INC_NODEFREES(cachep); 1135 STATS_INC_NODEFREES(cachep);
1111 if (n->alien && n->alien[nodeid]) { 1136 if (n->alien && n->alien[nodeid]) {
1112 alien = n->alien[nodeid]; 1137 alien = n->alien[nodeid];
1113 spin_lock(&alien->lock); 1138 ac = &alien->ac;
1114 if (unlikely(alien->avail == alien->limit)) { 1139 spin_lock(&ac->lock);
1140 if (unlikely(ac->avail == ac->limit)) {
1115 STATS_INC_ACOVERFLOW(cachep); 1141 STATS_INC_ACOVERFLOW(cachep);
1116 __drain_alien_cache(cachep, alien, nodeid); 1142 __drain_alien_cache(cachep, ac, nodeid);
1117 } 1143 }
1118 ac_put_obj(cachep, alien, objp); 1144 ac_put_obj(cachep, ac, objp);
1119 spin_unlock(&alien->lock); 1145 spin_unlock(&ac->lock);
1120 } else { 1146 } else {
1121 n = get_node(cachep, nodeid); 1147 n = get_node(cachep, nodeid);
1122 spin_lock(&n->list_lock); 1148 spin_lock(&n->list_lock);
@@ -1191,7 +1217,7 @@ static void cpuup_canceled(long cpu)
1191 list_for_each_entry(cachep, &slab_caches, list) { 1217 list_for_each_entry(cachep, &slab_caches, list) {
1192 struct array_cache *nc; 1218 struct array_cache *nc;
1193 struct array_cache *shared; 1219 struct array_cache *shared;
1194 struct array_cache **alien; 1220 struct alien_cache **alien;
1195 LIST_HEAD(list); 1221 LIST_HEAD(list);
1196 1222
1197 /* cpu is dead; no one can alloc from it. */ 1223 /* cpu is dead; no one can alloc from it. */
@@ -1272,7 +1298,7 @@ static int cpuup_prepare(long cpu)
1272 list_for_each_entry(cachep, &slab_caches, list) { 1298 list_for_each_entry(cachep, &slab_caches, list) {
1273 struct array_cache *nc; 1299 struct array_cache *nc;
1274 struct array_cache *shared = NULL; 1300 struct array_cache *shared = NULL;
1275 struct array_cache **alien = NULL; 1301 struct alien_cache **alien = NULL;
1276 1302
1277 nc = alloc_arraycache(node, cachep->limit, 1303 nc = alloc_arraycache(node, cachep->limit,
1278 cachep->batchcount, GFP_KERNEL); 1304 cachep->batchcount, GFP_KERNEL);
@@ -3762,7 +3788,7 @@ static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
3762 int node; 3788 int node;
3763 struct kmem_cache_node *n; 3789 struct kmem_cache_node *n;
3764 struct array_cache *new_shared; 3790 struct array_cache *new_shared;
3765 struct array_cache **new_alien = NULL; 3791 struct alien_cache **new_alien = NULL;
3766 3792
3767 for_each_online_node(node) { 3793 for_each_online_node(node) {
3768 3794
diff --git a/mm/slab.h b/mm/slab.h
index 3822b65edcc2..928823e17e58 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -276,7 +276,7 @@ struct kmem_cache_node {
276 unsigned int free_limit; 276 unsigned int free_limit;
277 unsigned int colour_next; /* Per-node cache coloring */ 277 unsigned int colour_next; /* Per-node cache coloring */
278 struct array_cache *shared; /* shared per node */ 278 struct array_cache *shared; /* shared per node */
279 struct array_cache **alien; /* on other nodes */ 279 struct alien_cache **alien; /* on other nodes */
280 unsigned long next_reap; /* updated without locking */ 280 unsigned long next_reap; /* updated without locking */
281 int free_touched; /* updated without locking */ 281 int free_touched; /* updated without locking */
282#endif 282#endif