aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 14:14:19 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-01 05:32:09 -0500
commitca34956b804b7554fc4e88826773380d9d5122a8 (patch)
tree5fbcbd881ebe6e0229f59ff97f7d7a36ccd6e004 /mm
parentce8eb6c424c794d7fb4d1a6667d267990ca28072 (diff)
slab: Common definition for kmem_cache_node
Put the definitions for the kmem_cache_node structures together so that we have one structure. That will allow us to create more common fields in the future which could yield more opportunities to share code. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c17
-rw-r--r--mm/slab.h32
2 files changed, 32 insertions, 17 deletions
diff --git a/mm/slab.c b/mm/slab.c
index c162b2eb493a..17f859614546 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -286,23 +286,6 @@ struct arraycache_init {
286}; 286};
287 287
288/* 288/*
289 * The slab lists for all objects.
290 */
291struct kmem_cache_node {
292 struct list_head slabs_partial; /* partial list first, better asm code */
293 struct list_head slabs_full;
294 struct list_head slabs_free;
295 unsigned long free_objects;
296 unsigned int free_limit;
297 unsigned int colour_next; /* Per-node cache coloring */
298 spinlock_t list_lock;
299 struct array_cache *shared; /* shared per node */
300 struct array_cache **alien; /* on other nodes */
301 unsigned long next_reap; /* updated without locking */
302 int free_touched; /* updated without locking */
303};
304
305/*
306 * Need this for bootstrapping a per node allocator. 289 * Need this for bootstrapping a per node allocator.
307 */ 290 */
308#define NUM_INIT_LISTS (3 * MAX_NUMNODES) 291#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
diff --git a/mm/slab.h b/mm/slab.h
index f0a552ff7b9b..f96b49e4704e 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -239,3 +239,35 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
239 return s; 239 return s;
240} 240}
241#endif 241#endif
242
243
244/*
245 * The slab lists for all objects.
246 */
247struct kmem_cache_node {
248 spinlock_t list_lock;
249
250#ifdef CONFIG_SLAB
251 struct list_head slabs_partial; /* partial list first, better asm code */
252 struct list_head slabs_full;
253 struct list_head slabs_free;
254 unsigned long free_objects;
255 unsigned int free_limit;
256 unsigned int colour_next; /* Per-node cache coloring */
257 struct array_cache *shared; /* shared per node */
258 struct array_cache **alien; /* on other nodes */
259 unsigned long next_reap; /* updated without locking */
260 int free_touched; /* updated without locking */
261#endif
262
263#ifdef CONFIG_SLUB
264 unsigned long nr_partial;
265 struct list_head partial;
266#ifdef CONFIG_SLUB_DEBUG
267 atomic_long_t nr_slabs;
268 atomic_long_t total_objects;
269 struct list_head full;
270#endif
271#endif
272
273};