diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2013-06-28 02:00:25 -0400 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2013-06-28 02:00:25 -0400 |
commit | 31881d74b6dd1a6c530cff61248def4f2da38bee (patch) | |
tree | be62420cf39192074e13b25553d172b9d5e58a33 /include/linux/slub_def.h | |
parent | 8855f30cd2b68012571932c7b01290c20be4508c (diff) | |
parent | 257867dc8d893690c175c1f717f91c3b6d44a63d (diff) |
Merge branch 'for-next' of git://github.com/rydberg/linux into next
Pull in changes from Henrik: "a trivial MT documentation fix".
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r-- | include/linux/slub_def.h | 136 |
1 files changed, 11 insertions, 125 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 9db4825cd393..027276fa8713 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -53,17 +53,6 @@ struct kmem_cache_cpu { | |||
53 | #endif | 53 | #endif |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct kmem_cache_node { | ||
57 | spinlock_t list_lock; /* Protect partial list and nr_partial */ | ||
58 | unsigned long nr_partial; | ||
59 | struct list_head partial; | ||
60 | #ifdef CONFIG_SLUB_DEBUG | ||
61 | atomic_long_t nr_slabs; | ||
62 | atomic_long_t total_objects; | ||
63 | struct list_head full; | ||
64 | #endif | ||
65 | }; | ||
66 | |||
67 | /* | 56 | /* |
68 | * Word size structure that can be atomically updated or read and that | 57 | * Word size structure that can be atomically updated or read and that |
69 | * contains both the order and the number of objects that a slab of the | 58 | * contains both the order and the number of objects that a slab of the |
@@ -115,111 +104,6 @@ struct kmem_cache { | |||
115 | struct kmem_cache_node *node[MAX_NUMNODES]; | 104 | struct kmem_cache_node *node[MAX_NUMNODES]; |
116 | }; | 105 | }; |
117 | 106 | ||
118 | /* | ||
119 | * Kmalloc subsystem. | ||
120 | */ | ||
121 | #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 | ||
122 | #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN | ||
123 | #else | ||
124 | #define KMALLOC_MIN_SIZE 8 | ||
125 | #endif | ||
126 | |||
127 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | ||
128 | |||
129 | /* | ||
130 | * Maximum kmalloc object size handled by SLUB. Larger object allocations | ||
131 | * are passed through to the page allocator. The page allocator "fastpath" | ||
132 | * is relatively slow so we need this value sufficiently high so that | ||
133 | * performance critical objects are allocated through the SLUB fastpath. | ||
134 | * | ||
135 | * This should be dropped to PAGE_SIZE / 2 once the page allocator | ||
136 | * "fastpath" becomes competitive with the slab allocator fastpaths. | ||
137 | */ | ||
138 | #define SLUB_MAX_SIZE (2 * PAGE_SIZE) | ||
139 | |||
140 | #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) | ||
141 | |||
142 | #ifdef CONFIG_ZONE_DMA | ||
143 | #define SLUB_DMA __GFP_DMA | ||
144 | #else | ||
145 | /* Disable DMA functionality */ | ||
146 | #define SLUB_DMA (__force gfp_t)0 | ||
147 | #endif | ||
148 | |||
149 | /* | ||
150 | * We keep the general caches in an array of slab caches that are used for | ||
151 | * 2^x bytes of allocations. | ||
152 | */ | ||
153 | extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; | ||
154 | |||
155 | /* | ||
156 | * Sorry that the following has to be that ugly but some versions of GCC | ||
157 | * have trouble with constant propagation and loops. | ||
158 | */ | ||
159 | static __always_inline int kmalloc_index(size_t size) | ||
160 | { | ||
161 | if (!size) | ||
162 | return 0; | ||
163 | |||
164 | if (size <= KMALLOC_MIN_SIZE) | ||
165 | return KMALLOC_SHIFT_LOW; | ||
166 | |||
167 | if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) | ||
168 | return 1; | ||
169 | if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) | ||
170 | return 2; | ||
171 | if (size <= 8) return 3; | ||
172 | if (size <= 16) return 4; | ||
173 | if (size <= 32) return 5; | ||
174 | if (size <= 64) return 6; | ||
175 | if (size <= 128) return 7; | ||
176 | if (size <= 256) return 8; | ||
177 | if (size <= 512) return 9; | ||
178 | if (size <= 1024) return 10; | ||
179 | if (size <= 2 * 1024) return 11; | ||
180 | if (size <= 4 * 1024) return 12; | ||
181 | /* | ||
182 | * The following is only needed to support architectures with a larger page | ||
183 | * size than 4k. We need to support 2 * PAGE_SIZE here. So for a 64k page | ||
184 | * size we would have to go up to 128k. | ||
185 | */ | ||
186 | if (size <= 8 * 1024) return 13; | ||
187 | if (size <= 16 * 1024) return 14; | ||
188 | if (size <= 32 * 1024) return 15; | ||
189 | if (size <= 64 * 1024) return 16; | ||
190 | if (size <= 128 * 1024) return 17; | ||
191 | if (size <= 256 * 1024) return 18; | ||
192 | if (size <= 512 * 1024) return 19; | ||
193 | if (size <= 1024 * 1024) return 20; | ||
194 | if (size <= 2 * 1024 * 1024) return 21; | ||
195 | BUG(); | ||
196 | return -1; /* Will never be reached */ | ||
197 | |||
198 | /* | ||
199 | * What we really wanted to do and cannot do because of compiler issues is: | ||
200 | * int i; | ||
201 | * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) | ||
202 | * if (size <= (1 << i)) | ||
203 | * return i; | ||
204 | */ | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Find the slab cache for a given combination of allocation flags and size. | ||
209 | * | ||
210 | * This ought to end up with a global pointer to the right cache | ||
211 | * in kmalloc_caches. | ||
212 | */ | ||
213 | static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | ||
214 | { | ||
215 | int index = kmalloc_index(size); | ||
216 | |||
217 | if (index == 0) | ||
218 | return NULL; | ||
219 | |||
220 | return kmalloc_caches[index]; | ||
221 | } | ||
222 | |||
223 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 107 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
224 | void *__kmalloc(size_t size, gfp_t flags); | 108 | void *__kmalloc(size_t size, gfp_t flags); |
225 | 109 | ||
@@ -274,16 +158,17 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | |||
274 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 158 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
275 | { | 159 | { |
276 | if (__builtin_constant_p(size)) { | 160 | if (__builtin_constant_p(size)) { |
277 | if (size > SLUB_MAX_SIZE) | 161 | if (size > KMALLOC_MAX_CACHE_SIZE) |
278 | return kmalloc_large(size, flags); | 162 | return kmalloc_large(size, flags); |
279 | 163 | ||
280 | if (!(flags & SLUB_DMA)) { | 164 | if (!(flags & GFP_DMA)) { |
281 | struct kmem_cache *s = kmalloc_slab(size); | 165 | int index = kmalloc_index(size); |
282 | 166 | ||
283 | if (!s) | 167 | if (!index) |
284 | return ZERO_SIZE_PTR; | 168 | return ZERO_SIZE_PTR; |
285 | 169 | ||
286 | return kmem_cache_alloc_trace(s, flags, size); | 170 | return kmem_cache_alloc_trace(kmalloc_caches[index], |
171 | flags, size); | ||
287 | } | 172 | } |
288 | } | 173 | } |
289 | return __kmalloc(size, flags); | 174 | return __kmalloc(size, flags); |
@@ -310,13 +195,14 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, | |||
310 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 195 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
311 | { | 196 | { |
312 | if (__builtin_constant_p(size) && | 197 | if (__builtin_constant_p(size) && |
313 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { | 198 | size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { |
314 | struct kmem_cache *s = kmalloc_slab(size); | 199 | int index = kmalloc_index(size); |
315 | 200 | ||
316 | if (!s) | 201 | if (!index) |
317 | return ZERO_SIZE_PTR; | 202 | return ZERO_SIZE_PTR; |
318 | 203 | ||
319 | return kmem_cache_alloc_node_trace(s, flags, node, size); | 204 | return kmem_cache_alloc_node_trace(kmalloc_caches[index], |
205 | flags, node, size); | ||
320 | } | 206 | } |
321 | return __kmalloc_node(size, flags, node); | 207 | return __kmalloc_node(size, flags, node); |
322 | } | 208 | } |