aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slab_def.h81
-rw-r--r--mm/slab.c81
2 files changed, 81 insertions, 81 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 713f841ecaa9..850d057500de 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -16,6 +16,87 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/kmemtrace.h> 17#include <linux/kmemtrace.h>
18 18
19/*
20 * struct kmem_cache
21 *
22 * manages a cache.
23 */
24
25struct kmem_cache {
26/* 1) per-cpu data, touched during every alloc/free */
27 struct array_cache *array[NR_CPUS];
28/* 2) Cache tunables. Protected by cache_chain_mutex */
29 unsigned int batchcount;
30 unsigned int limit;
31 unsigned int shared;
32
33 unsigned int buffer_size;
34 u32 reciprocal_buffer_size;
35/* 3) touched by every alloc & free from the backend */
36
37 unsigned int flags; /* constant flags */
38 unsigned int num; /* # of objs per slab */
39
40/* 4) cache_grow/shrink */
41 /* order of pgs per slab (2^n) */
42 unsigned int gfporder;
43
44 /* force GFP flags, e.g. GFP_DMA */
45 gfp_t gfpflags;
46
47 size_t colour; /* cache colouring range */
48 unsigned int colour_off; /* colour offset */
49 struct kmem_cache *slabp_cache;
50 unsigned int slab_size;
51 unsigned int dflags; /* dynamic flags */
52
53 /* constructor func */
54 void (*ctor)(void *obj);
55
56/* 5) cache creation/removal */
57 const char *name;
58 struct list_head next;
59
60/* 6) statistics */
61#ifdef CONFIG_DEBUG_SLAB
62 unsigned long num_active;
63 unsigned long num_allocations;
64 unsigned long high_mark;
65 unsigned long grown;
66 unsigned long reaped;
67 unsigned long errors;
68 unsigned long max_freeable;
69 unsigned long node_allocs;
70 unsigned long node_frees;
71 unsigned long node_overflow;
72 atomic_t allochit;
73 atomic_t allocmiss;
74 atomic_t freehit;
75 atomic_t freemiss;
76
77 /*
78 * If debugging is enabled, then the allocator can add additional
79 * fields and/or padding to every object. buffer_size contains the total
80 * object size including these internal fields, the following two
81 * variables contain the offset to the user object and its size.
82 */
83 int obj_offset;
84 int obj_size;
85#endif /* CONFIG_DEBUG_SLAB */
86
87 /*
88 * We put nodelists[] at the end of kmem_cache, because we want to size
89 * this array to nr_node_ids slots instead of MAX_NUMNODES
90 * (see kmem_cache_init())
91 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
92 * is statically defined, so we reserve the max number of nodes.
93 */
94 struct kmem_list3 *nodelists[MAX_NUMNODES];
95 /*
96 * Do not add fields after nodelists[]
97 */
98};
99
19/* Size description struct for general caches. */ 100/* Size description struct for general caches. */
20struct cache_sizes { 101struct cache_sizes {
21 size_t cs_size; 102 size_t cs_size;
diff --git a/mm/slab.c b/mm/slab.c
index f46b65d124e5..bf0c3af143fb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -374,87 +374,6 @@ static void kmem_list3_init(struct kmem_list3 *parent)
374 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 374 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
375 } while (0) 375 } while (0)
376 376
377/*
378 * struct kmem_cache
379 *
380 * manages a cache.
381 */
382
383struct kmem_cache {
384/* 1) per-cpu data, touched during every alloc/free */
385 struct array_cache *array[NR_CPUS];
386/* 2) Cache tunables. Protected by cache_chain_mutex */
387 unsigned int batchcount;
388 unsigned int limit;
389 unsigned int shared;
390
391 unsigned int buffer_size;
392 u32 reciprocal_buffer_size;
393/* 3) touched by every alloc & free from the backend */
394
395 unsigned int flags; /* constant flags */
396 unsigned int num; /* # of objs per slab */
397
398/* 4) cache_grow/shrink */
399 /* order of pgs per slab (2^n) */
400 unsigned int gfporder;
401
402 /* force GFP flags, e.g. GFP_DMA */
403 gfp_t gfpflags;
404
405 size_t colour; /* cache colouring range */
406 unsigned int colour_off; /* colour offset */
407 struct kmem_cache *slabp_cache;
408 unsigned int slab_size;
409 unsigned int dflags; /* dynamic flags */
410
411 /* constructor func */
412 void (*ctor)(void *obj);
413
414/* 5) cache creation/removal */
415 const char *name;
416 struct list_head next;
417
418/* 6) statistics */
419#if STATS
420 unsigned long num_active;
421 unsigned long num_allocations;
422 unsigned long high_mark;
423 unsigned long grown;
424 unsigned long reaped;
425 unsigned long errors;
426 unsigned long max_freeable;
427 unsigned long node_allocs;
428 unsigned long node_frees;
429 unsigned long node_overflow;
430 atomic_t allochit;
431 atomic_t allocmiss;
432 atomic_t freehit;
433 atomic_t freemiss;
434#endif
435#if DEBUG
436 /*
437 * If debugging is enabled, then the allocator can add additional
438 * fields and/or padding to every object. buffer_size contains the total
439 * object size including these internal fields, the following two
440 * variables contain the offset to the user object and its size.
441 */
442 int obj_offset;
443 int obj_size;
444#endif
445 /*
446 * We put nodelists[] at the end of kmem_cache, because we want to size
447 * this array to nr_node_ids slots instead of MAX_NUMNODES
448 * (see kmem_cache_init())
449 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
450 * is statically defined, so we reserve the max number of nodes.
451 */
452 struct kmem_list3 *nodelists[MAX_NUMNODES];
453 /*
454 * Do not add fields after nodelists[]
455 */
456};
457
458#define CFLGS_OFF_SLAB (0x80000000UL) 377#define CFLGS_OFF_SLAB (0x80000000UL)
459#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 378#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
460 379