aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-08-06 19:04:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:13 -0400
commit44c5356fb460053112ab87c9601df1605054edca (patch)
treed99acf7525a8092a2a7527c6d5599b330ba9262a /mm/slab.h
parent1536cb39338aff16b0e30cc6708da03b268337f7 (diff)
slab common: add functions for kmem_cache_node access
The patchset provides two new functions in mm/slab.h and modifies SLAB and SLUB to use these. The kmem_cache_node structure is shared between both allocators and the use of common accessors will allow us to move more code into slab_common.c in the future. This patch (of 3): These functions allow to eliminate repeatedly used code in both SLAB and SLUB and also allow for the insertion of debugging code that may be needed in the development process. Signed-off-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h17
1 files changed, 16 insertions, 1 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 961a3fb1f5a2..3f9766e393a3 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -262,7 +262,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
262} 262}
263#endif 263#endif
264 264
265 265#ifndef CONFIG_SLOB
266/* 266/*
267 * The slab lists for all objects. 267 * The slab lists for all objects.
268 */ 268 */
@@ -294,5 +294,20 @@ struct kmem_cache_node {
294 294
295}; 295};
296 296
297static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
298{
299 return s->node[node];
300}
301
302/*
303 * Iterator over all nodes. The body will be executed for each node that has
304 * a kmem_cache_node structure allocated (which is true for all online nodes)
305 */
306#define for_each_kmem_cache_node(__s, __node, __n) \
307 for (__node = 0; __n = get_node(__s, __node), __node < nr_node_ids; __node++) \
308 if (__n)
309
310#endif
311
297void *slab_next(struct seq_file *m, void *p, loff_t *pos); 312void *slab_next(struct seq_file *m, void *p, loff_t *pos);
298void slab_stop(struct seq_file *m, void *p); 313void slab_stop(struct seq_file *m, void *p);