aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2008-05-09 14:32:44 -0400
committerVegard Nossum <vegard.nossum@gmail.com>2009-06-13 02:58:43 -0400
commit8eae985f08138758e06503588f5f1196269bc415 (patch)
tree6bcd43b5ee4cfd225ee2a630441b61c7c2ce69eb /mm/slab.c
parentb618ad31bb2020db6a36929122e5554e33210d47 (diff)
slab: move struct kmem_cache to headers
Move the SLAB struct kmem_cache definition to <linux/slab_def.h> like with SLUB so kmemcheck can access ->ctor and ->flags. Cc: Ingo Molnar <mingo@elte.hu> Cc: Christoph Lameter <clameter@sgi.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> [rebased for mainline inclusion] Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c81
1 files changed, 0 insertions, 81 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f46b65d124e5..bf0c3af143fb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -374,87 +374,6 @@ static void kmem_list3_init(struct kmem_list3 *parent)
374 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 374 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
375 } while (0) 375 } while (0)
376 376
377/*
378 * struct kmem_cache
379 *
380 * manages a cache.
381 */
382
383struct kmem_cache {
384/* 1) per-cpu data, touched during every alloc/free */
385 struct array_cache *array[NR_CPUS];
386/* 2) Cache tunables. Protected by cache_chain_mutex */
387 unsigned int batchcount;
388 unsigned int limit;
389 unsigned int shared;
390
391 unsigned int buffer_size;
392 u32 reciprocal_buffer_size;
393/* 3) touched by every alloc & free from the backend */
394
395 unsigned int flags; /* constant flags */
396 unsigned int num; /* # of objs per slab */
397
398/* 4) cache_grow/shrink */
399 /* order of pgs per slab (2^n) */
400 unsigned int gfporder;
401
402 /* force GFP flags, e.g. GFP_DMA */
403 gfp_t gfpflags;
404
405 size_t colour; /* cache colouring range */
406 unsigned int colour_off; /* colour offset */
407 struct kmem_cache *slabp_cache;
408 unsigned int slab_size;
409 unsigned int dflags; /* dynamic flags */
410
411 /* constructor func */
412 void (*ctor)(void *obj);
413
414/* 5) cache creation/removal */
415 const char *name;
416 struct list_head next;
417
418/* 6) statistics */
419#if STATS
420 unsigned long num_active;
421 unsigned long num_allocations;
422 unsigned long high_mark;
423 unsigned long grown;
424 unsigned long reaped;
425 unsigned long errors;
426 unsigned long max_freeable;
427 unsigned long node_allocs;
428 unsigned long node_frees;
429 unsigned long node_overflow;
430 atomic_t allochit;
431 atomic_t allocmiss;
432 atomic_t freehit;
433 atomic_t freemiss;
434#endif
435#if DEBUG
436 /*
437 * If debugging is enabled, then the allocator can add additional
438 * fields and/or padding to every object. buffer_size contains the total
439 * object size including these internal fields, the following two
440 * variables contain the offset to the user object and its size.
441 */
442 int obj_offset;
443 int obj_size;
444#endif
445 /*
446 * We put nodelists[] at the end of kmem_cache, because we want to size
447 * this array to nr_node_ids slots instead of MAX_NUMNODES
448 * (see kmem_cache_init())
449 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
450 * is statically defined, so we reserve the max number of nodes.
451 */
452 struct kmem_list3 *nodelists[MAX_NUMNODES];
453 /*
454 * Do not add fields after nodelists[]
455 */
456};
457
458#define CFLGS_OFF_SLAB (0x80000000UL) 377#define CFLGS_OFF_SLAB (0x80000000UL)
459#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 378#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
460 379