diff options
Diffstat (limited to 'include/linux/slab_def.h')
-rw-r--r-- | include/linux/slab_def.h | 52 |
1 files changed, 13 insertions, 39 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 83203ae9390..d00e0bacda9 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -18,53 +18,25 @@ | |||
18 | #include <trace/events/kmem.h> | 18 | #include <trace/events/kmem.h> |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * Enforce a minimum alignment for the kmalloc caches. | ||
22 | * Usually, the kmalloc caches are cache_line_size() aligned, except when | ||
23 | * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. | ||
24 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed | ||
25 | * alignment larger than the alignment of a 64-bit integer. | ||
26 | * ARCH_KMALLOC_MINALIGN allows that. | ||
27 | * Note that increasing this value may disable some debug features. | ||
28 | */ | ||
29 | #ifdef ARCH_DMA_MINALIGN | ||
30 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN | ||
31 | #else | ||
32 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | ||
33 | #endif | ||
34 | |||
35 | #ifndef ARCH_SLAB_MINALIGN | ||
36 | /* | ||
37 | * Enforce a minimum alignment for all caches. | ||
38 | * Intended for archs that get misalignment faults even for BYTES_PER_WORD | ||
39 | * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. | ||
40 | * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables | ||
41 | * some debug features. | ||
42 | */ | ||
43 | #define ARCH_SLAB_MINALIGN 0 | ||
44 | #endif | ||
45 | |||
46 | /* | ||
47 | * struct kmem_cache | 21 | * struct kmem_cache |
48 | * | 22 | * |
49 | * manages a cache. | 23 | * manages a cache. |
50 | */ | 24 | */ |
51 | 25 | ||
52 | struct kmem_cache { | 26 | struct kmem_cache { |
53 | /* 1) per-cpu data, touched during every alloc/free */ | 27 | /* 1) Cache tunables. Protected by cache_chain_mutex */ |
54 | struct array_cache *array[NR_CPUS]; | ||
55 | /* 2) Cache tunables. Protected by cache_chain_mutex */ | ||
56 | unsigned int batchcount; | 28 | unsigned int batchcount; |
57 | unsigned int limit; | 29 | unsigned int limit; |
58 | unsigned int shared; | 30 | unsigned int shared; |
59 | 31 | ||
60 | unsigned int buffer_size; | 32 | unsigned int buffer_size; |
61 | u32 reciprocal_buffer_size; | 33 | u32 reciprocal_buffer_size; |
62 | /* 3) touched by every alloc & free from the backend */ | 34 | /* 2) touched by every alloc & free from the backend */ |
63 | 35 | ||
64 | unsigned int flags; /* constant flags */ | 36 | unsigned int flags; /* constant flags */ |
65 | unsigned int num; /* # of objs per slab */ | 37 | unsigned int num; /* # of objs per slab */ |
66 | 38 | ||
67 | /* 4) cache_grow/shrink */ | 39 | /* 3) cache_grow/shrink */ |
68 | /* order of pgs per slab (2^n) */ | 40 | /* order of pgs per slab (2^n) */ |
69 | unsigned int gfporder; | 41 | unsigned int gfporder; |
70 | 42 | ||
@@ -80,11 +52,11 @@ struct kmem_cache { | |||
80 | /* constructor func */ | 52 | /* constructor func */ |
81 | void (*ctor)(void *obj); | 53 | void (*ctor)(void *obj); |
82 | 54 | ||
83 | /* 5) cache creation/removal */ | 55 | /* 4) cache creation/removal */ |
84 | const char *name; | 56 | const char *name; |
85 | struct list_head next; | 57 | struct list_head next; |
86 | 58 | ||
87 | /* 6) statistics */ | 59 | /* 5) statistics */ |
88 | #ifdef CONFIG_DEBUG_SLAB | 60 | #ifdef CONFIG_DEBUG_SLAB |
89 | unsigned long num_active; | 61 | unsigned long num_active; |
90 | unsigned long num_allocations; | 62 | unsigned long num_allocations; |
@@ -111,16 +83,18 @@ struct kmem_cache { | |||
111 | int obj_size; | 83 | int obj_size; |
112 | #endif /* CONFIG_DEBUG_SLAB */ | 84 | #endif /* CONFIG_DEBUG_SLAB */ |
113 | 85 | ||
86 | /* 6) per-cpu/per-node data, touched during every alloc/free */ | ||
114 | /* | 87 | /* |
115 | * We put nodelists[] at the end of kmem_cache, because we want to size | 88 | * We put array[] at the end of kmem_cache, because we want to size |
116 | * this array to nr_node_ids slots instead of MAX_NUMNODES | 89 | * this array to nr_cpu_ids slots instead of NR_CPUS |
117 | * (see kmem_cache_init()) | 90 | * (see kmem_cache_init()) |
118 | * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache | 91 | * We still use [NR_CPUS] and not [1] or [0] because cache_cache |
119 | * is statically defined, so we reserve the max number of nodes. | 92 | * is statically defined, so we reserve the max number of cpus. |
120 | */ | 93 | */ |
121 | struct kmem_list3 *nodelists[MAX_NUMNODES]; | 94 | struct kmem_list3 **nodelists; |
95 | struct array_cache *array[NR_CPUS]; | ||
122 | /* | 96 | /* |
123 | * Do not add fields after nodelists[] | 97 | * Do not add fields after array[] |
124 | */ | 98 | */ |
125 | }; | 99 | }; |
126 | 100 | ||