aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-08-06 19:04:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:13 -0400
commitfa45dc254bcf740852752effa35387be684947f8 (patch)
tree5c8c3b9ec26e90fc230bcc615598cc97f4462e66 /mm/slub.c
parent44c5356fb460053112ab87c9601df1605054edca (diff)
slub: use new node functions
Make use of the new node functions in mm/slab.h to reduce code size and simplify. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Christoph Lameter <cl@linux.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Acked-by: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c78
1 files changed, 29 insertions, 49 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2569802aa7cc..3918cd62a4b2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2157,6 +2157,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2157 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 2157 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2158 DEFAULT_RATELIMIT_BURST); 2158 DEFAULT_RATELIMIT_BURST);
2159 int node; 2159 int node;
2160 struct kmem_cache_node *n;
2160 2161
2161 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 2162 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2162 return; 2163 return;
@@ -2171,15 +2172,11 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2171 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", 2172 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
2172 s->name); 2173 s->name);
2173 2174
2174 for_each_online_node(node) { 2175 for_each_kmem_cache_node(s, node, n) {
2175 struct kmem_cache_node *n = get_node(s, node);
2176 unsigned long nr_slabs; 2176 unsigned long nr_slabs;
2177 unsigned long nr_objs; 2177 unsigned long nr_objs;
2178 unsigned long nr_free; 2178 unsigned long nr_free;
2179 2179
2180 if (!n)
2181 continue;
2182
2183 nr_free = count_partial(n, count_free); 2180 nr_free = count_partial(n, count_free);
2184 nr_slabs = node_nr_slabs(n); 2181 nr_slabs = node_nr_slabs(n);
2185 nr_objs = node_nr_objs(n); 2182 nr_objs = node_nr_objs(n);
@@ -2923,13 +2920,10 @@ static void early_kmem_cache_node_alloc(int node)
2923static void free_kmem_cache_nodes(struct kmem_cache *s) 2920static void free_kmem_cache_nodes(struct kmem_cache *s)
2924{ 2921{
2925 int node; 2922 int node;
2923 struct kmem_cache_node *n;
2926 2924
2927 for_each_node_state(node, N_NORMAL_MEMORY) { 2925 for_each_kmem_cache_node(s, node, n) {
2928 struct kmem_cache_node *n = s->node[node]; 2926 kmem_cache_free(kmem_cache_node, n);
2929
2930 if (n)
2931 kmem_cache_free(kmem_cache_node, n);
2932
2933 s->node[node] = NULL; 2927 s->node[node] = NULL;
2934 } 2928 }
2935} 2929}
@@ -3217,12 +3211,11 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3217static inline int kmem_cache_close(struct kmem_cache *s) 3211static inline int kmem_cache_close(struct kmem_cache *s)
3218{ 3212{
3219 int node; 3213 int node;
3214 struct kmem_cache_node *n;
3220 3215
3221 flush_all(s); 3216 flush_all(s);
3222 /* Attempt to free all objects */ 3217 /* Attempt to free all objects */
3223 for_each_node_state(node, N_NORMAL_MEMORY) { 3218 for_each_kmem_cache_node(s, node, n) {
3224 struct kmem_cache_node *n = get_node(s, node);
3225
3226 free_partial(s, n); 3219 free_partial(s, n);
3227 if (n->nr_partial || slabs_node(s, node)) 3220 if (n->nr_partial || slabs_node(s, node))
3228 return 1; 3221 return 1;
@@ -3407,9 +3400,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
3407 return -ENOMEM; 3400 return -ENOMEM;
3408 3401
3409 flush_all(s); 3402 flush_all(s);
3410 for_each_node_state(node, N_NORMAL_MEMORY) { 3403 for_each_kmem_cache_node(s, node, n) {
3411 n = get_node(s, node);
3412
3413 if (!n->nr_partial) 3404 if (!n->nr_partial)
3414 continue; 3405 continue;
3415 3406
@@ -3581,6 +3572,7 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
3581{ 3572{
3582 int node; 3573 int node;
3583 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 3574 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
3575 struct kmem_cache_node *n;
3584 3576
3585 memcpy(s, static_cache, kmem_cache->object_size); 3577 memcpy(s, static_cache, kmem_cache->object_size);
3586 3578
@@ -3590,19 +3582,16 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
3590 * IPIs around. 3582 * IPIs around.
3591 */ 3583 */
3592 __flush_cpu_slab(s, smp_processor_id()); 3584 __flush_cpu_slab(s, smp_processor_id());
3593 for_each_node_state(node, N_NORMAL_MEMORY) { 3585 for_each_kmem_cache_node(s, node, n) {
3594 struct kmem_cache_node *n = get_node(s, node);
3595 struct page *p; 3586 struct page *p;
3596 3587
3597 if (n) { 3588 list_for_each_entry(p, &n->partial, lru)
3598 list_for_each_entry(p, &n->partial, lru) 3589 p->slab_cache = s;
3599 p->slab_cache = s;
3600 3590
3601#ifdef CONFIG_SLUB_DEBUG 3591#ifdef CONFIG_SLUB_DEBUG
3602 list_for_each_entry(p, &n->full, lru) 3592 list_for_each_entry(p, &n->full, lru)
3603 p->slab_cache = s; 3593 p->slab_cache = s;
3604#endif 3594#endif
3605 }
3606 } 3595 }
3607 list_add(&s->list, &slab_caches); 3596 list_add(&s->list, &slab_caches);
3608 return s; 3597 return s;
@@ -3955,16 +3944,14 @@ static long validate_slab_cache(struct kmem_cache *s)
3955 unsigned long count = 0; 3944 unsigned long count = 0;
3956 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 3945 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3957 sizeof(unsigned long), GFP_KERNEL); 3946 sizeof(unsigned long), GFP_KERNEL);
3947 struct kmem_cache_node *n;
3958 3948
3959 if (!map) 3949 if (!map)
3960 return -ENOMEM; 3950 return -ENOMEM;
3961 3951
3962 flush_all(s); 3952 flush_all(s);
3963 for_each_node_state(node, N_NORMAL_MEMORY) { 3953 for_each_kmem_cache_node(s, node, n)
3964 struct kmem_cache_node *n = get_node(s, node);
3965
3966 count += validate_slab_node(s, n, map); 3954 count += validate_slab_node(s, n, map);
3967 }
3968 kfree(map); 3955 kfree(map);
3969 return count; 3956 return count;
3970} 3957}
@@ -4118,6 +4105,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
4118 int node; 4105 int node;
4119 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 4106 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4120 sizeof(unsigned long), GFP_KERNEL); 4107 sizeof(unsigned long), GFP_KERNEL);
4108 struct kmem_cache_node *n;
4121 4109
4122 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 4110 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4123 GFP_TEMPORARY)) { 4111 GFP_TEMPORARY)) {
@@ -4127,8 +4115,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
4127 /* Push back cpu slabs */ 4115 /* Push back cpu slabs */
4128 flush_all(s); 4116 flush_all(s);
4129 4117
4130 for_each_node_state(node, N_NORMAL_MEMORY) { 4118 for_each_kmem_cache_node(s, node, n) {
4131 struct kmem_cache_node *n = get_node(s, node);
4132 unsigned long flags; 4119 unsigned long flags;
4133 struct page *page; 4120 struct page *page;
4134 4121
@@ -4327,8 +4314,9 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4327 get_online_mems(); 4314 get_online_mems();
4328#ifdef CONFIG_SLUB_DEBUG 4315#ifdef CONFIG_SLUB_DEBUG
4329 if (flags & SO_ALL) { 4316 if (flags & SO_ALL) {
4330 for_each_node_state(node, N_NORMAL_MEMORY) { 4317 struct kmem_cache_node *n;
4331 struct kmem_cache_node *n = get_node(s, node); 4318
4319 for_each_kmem_cache_node(s, node, n) {
4332 4320
4333 if (flags & SO_TOTAL) 4321 if (flags & SO_TOTAL)
4334 x = atomic_long_read(&n->total_objects); 4322 x = atomic_long_read(&n->total_objects);
@@ -4344,9 +4332,9 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4344 } else 4332 } else
4345#endif 4333#endif
4346 if (flags & SO_PARTIAL) { 4334 if (flags & SO_PARTIAL) {
4347 for_each_node_state(node, N_NORMAL_MEMORY) { 4335 struct kmem_cache_node *n;
4348 struct kmem_cache_node *n = get_node(s, node);
4349 4336
4337 for_each_kmem_cache_node(s, node, n) {
4350 if (flags & SO_TOTAL) 4338 if (flags & SO_TOTAL)
4351 x = count_partial(n, count_total); 4339 x = count_partial(n, count_total);
4352 else if (flags & SO_OBJECTS) 4340 else if (flags & SO_OBJECTS)
@@ -4359,7 +4347,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4359 } 4347 }
4360 x = sprintf(buf, "%lu", total); 4348 x = sprintf(buf, "%lu", total);
4361#ifdef CONFIG_NUMA 4349#ifdef CONFIG_NUMA
4362 for_each_node_state(node, N_NORMAL_MEMORY) 4350 for (node = 0; node < nr_node_ids; node++)
4363 if (nodes[node]) 4351 if (nodes[node])
4364 x += sprintf(buf + x, " N%d=%lu", 4352 x += sprintf(buf + x, " N%d=%lu",
4365 node, nodes[node]); 4353 node, nodes[node]);
@@ -4373,16 +4361,12 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4373static int any_slab_objects(struct kmem_cache *s) 4361static int any_slab_objects(struct kmem_cache *s)
4374{ 4362{
4375 int node; 4363 int node;
4364 struct kmem_cache_node *n;
4376 4365
4377 for_each_online_node(node) { 4366 for_each_kmem_cache_node(s, node, n)
4378 struct kmem_cache_node *n = get_node(s, node);
4379
4380 if (!n)
4381 continue;
4382
4383 if (atomic_long_read(&n->total_objects)) 4367 if (atomic_long_read(&n->total_objects))
4384 return 1; 4368 return 1;
4385 } 4369
4386 return 0; 4370 return 0;
4387} 4371}
4388#endif 4372#endif
@@ -5337,13 +5321,9 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5337 unsigned long nr_objs = 0; 5321 unsigned long nr_objs = 0;
5338 unsigned long nr_free = 0; 5322 unsigned long nr_free = 0;
5339 int node; 5323 int node;
5324 struct kmem_cache_node *n;
5340 5325
5341 for_each_online_node(node) { 5326 for_each_kmem_cache_node(s, node, n) {
5342 struct kmem_cache_node *n = get_node(s, node);
5343
5344 if (!n)
5345 continue;
5346
5347 nr_slabs += node_nr_slabs(n); 5327 nr_slabs += node_nr_slabs(n);
5348 nr_objs += node_nr_objs(n); 5328 nr_objs += node_nr_objs(n);
5349 nr_free += count_partial(n, count_free); 5329 nr_free += count_partial(n, count_free);