aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-14 18:14:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-14 18:14:29 -0400
commit54be8200198ddfc6cb396720460c19881fac2d5a (patch)
tree58ccab6e0cfb35b30e7e16804f15fe9c94628f12 /include
parent41d9884c44237cd66e2bdbc412028b29196b344c (diff)
parentc25f195e828f847735c7626b5693ddc3b853d245 (diff)
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab update from Pekka Enberg: "Highlights: - Fix for boot-time problems on some architectures due to init_lock_keys() not respecting kmalloc_caches boundaries (Christoph Lameter) - CONFIG_SLUB_CPU_PARTIAL requested by RT folks (Joonsoo Kim) - Fix for excessive slab freelist draining (Wanpeng Li) - SLUB and SLOB cleanups and fixes (various people)" I ended up editing the branch, and this avoids two commits at the end that were immediately reverted, and I instead just applied the oneliner fix in between myself. * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux slub: Check for page NULL before doing the node_match check mm/slab: Give s_next and s_stop slab-specific names slob: Check for NULL pointer before calling ctor() slub: Make cpu partial slab support configurable slab: add kmalloc() to kernel API documentation slab: fix init_lock_keys slob: use DIV_ROUND_UP where possible slub: do not put a slab to cpu partial list when cpu_partial is 0 mm/slub: Use node_nr_slabs and node_nr_objs in get_slabinfo mm/slub: Drop unnecessary nr_partials mm/slab: Fix /proc/slabinfo unwriteable for slab mm/slab: Sharing s_next and s_stop between slab and slub mm/slab: Fix drain freelist excessively slob: Rework #ifdeffery in slab.h mm, slab: moved kmem_cache_alloc_node comment to correct place
Diffstat (limited to 'include')
-rw-r--r--include/linux/slab.h57
-rw-r--r--include/linux/slob_def.h8
2 files changed, 42 insertions, 23 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 0c621752caa6..6c5cc0ea8713 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -169,11 +169,7 @@ struct kmem_cache {
169 struct list_head list; /* List of all slab caches on the system */ 169 struct list_head list; /* List of all slab caches on the system */
170}; 170};
171 171
172#define KMALLOC_MAX_SIZE (1UL << 30) 172#endif /* CONFIG_SLOB */
173
174#include <linux/slob_def.h>
175
176#else /* CONFIG_SLOB */
177 173
178/* 174/*
179 * Kmalloc array related definitions 175 * Kmalloc array related definitions
@@ -195,7 +191,9 @@ struct kmem_cache {
195#ifndef KMALLOC_SHIFT_LOW 191#ifndef KMALLOC_SHIFT_LOW
196#define KMALLOC_SHIFT_LOW 5 192#define KMALLOC_SHIFT_LOW 5
197#endif 193#endif
198#else 194#endif
195
196#ifdef CONFIG_SLUB
199/* 197/*
200 * SLUB allocates up to order 2 pages directly and otherwise 198 * SLUB allocates up to order 2 pages directly and otherwise
201 * passes the request to the page allocator. 199 * passes the request to the page allocator.
@@ -207,6 +205,19 @@ struct kmem_cache {
207#endif 205#endif
208#endif 206#endif
209 207
208#ifdef CONFIG_SLOB
209/*
210 * SLOB passes all page size and larger requests to the page allocator.
211 * No kmalloc array is necessary since objects of different sizes can
212 * be allocated from the same page.
213 */
214#define KMALLOC_SHIFT_MAX 30
215#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
216#ifndef KMALLOC_SHIFT_LOW
217#define KMALLOC_SHIFT_LOW 3
218#endif
219#endif
220
210/* Maximum allocatable size */ 221/* Maximum allocatable size */
211#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 222#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
212/* Maximum size for which we actually use a slab cache */ 223/* Maximum size for which we actually use a slab cache */
@@ -221,6 +232,7 @@ struct kmem_cache {
221#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 232#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
222#endif 233#endif
223 234
235#ifndef CONFIG_SLOB
224extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 236extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
225#ifdef CONFIG_ZONE_DMA 237#ifdef CONFIG_ZONE_DMA
226extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; 238extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
@@ -275,13 +287,18 @@ static __always_inline int kmalloc_index(size_t size)
275 /* Will never be reached. Needed because the compiler may complain */ 287 /* Will never be reached. Needed because the compiler may complain */
276 return -1; 288 return -1;
277} 289}
290#endif /* !CONFIG_SLOB */
278 291
279#ifdef CONFIG_SLAB 292#ifdef CONFIG_SLAB
280#include <linux/slab_def.h> 293#include <linux/slab_def.h>
281#elif defined(CONFIG_SLUB) 294#endif
295
296#ifdef CONFIG_SLUB
282#include <linux/slub_def.h> 297#include <linux/slub_def.h>
283#else 298#endif
284#error "Unknown slab allocator" 299
300#ifdef CONFIG_SLOB
301#include <linux/slob_def.h>
285#endif 302#endif
286 303
287/* 304/*
@@ -291,6 +308,7 @@ static __always_inline int kmalloc_index(size_t size)
291 */ 308 */
292static __always_inline int kmalloc_size(int n) 309static __always_inline int kmalloc_size(int n)
293{ 310{
311#ifndef CONFIG_SLOB
294 if (n > 2) 312 if (n > 2)
295 return 1 << n; 313 return 1 << n;
296 314
@@ -299,10 +317,9 @@ static __always_inline int kmalloc_size(int n)
299 317
300 if (n == 2 && KMALLOC_MIN_SIZE <= 64) 318 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
301 return 192; 319 return 192;
302 320#endif
303 return 0; 321 return 0;
304} 322}
305#endif /* !CONFIG_SLOB */
306 323
307/* 324/*
308 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 325 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
@@ -356,9 +373,8 @@ int cache_show(struct kmem_cache *s, struct seq_file *m);
356void print_slabinfo_header(struct seq_file *m); 373void print_slabinfo_header(struct seq_file *m);
357 374
358/** 375/**
359 * kmalloc_array - allocate memory for an array. 376 * kmalloc - allocate memory
360 * @n: number of elements. 377 * @size: how many bytes of memory are required.
361 * @size: element size.
362 * @flags: the type of memory to allocate. 378 * @flags: the type of memory to allocate.
363 * 379 *
364 * The @flags argument may be one of: 380 * The @flags argument may be one of:
@@ -405,6 +421,17 @@ void print_slabinfo_header(struct seq_file *m);
405 * There are other flags available as well, but these are not intended 421 * There are other flags available as well, but these are not intended
406 * for general use, and so are not documented here. For a full list of 422 * for general use, and so are not documented here. For a full list of
407 * potential flags, always refer to linux/gfp.h. 423 * potential flags, always refer to linux/gfp.h.
424 *
425 * kmalloc is the normal method of allocating memory
426 * in the kernel.
427 */
428static __always_inline void *kmalloc(size_t size, gfp_t flags);
429
430/**
431 * kmalloc_array - allocate memory for an array.
432 * @n: number of elements.
433 * @size: element size.
434 * @flags: the type of memory to allocate (see kmalloc).
408 */ 435 */
409static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) 436static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
410{ 437{
@@ -428,7 +455,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
428/** 455/**
429 * kmalloc_node - allocate memory from a specific node 456 * kmalloc_node - allocate memory from a specific node
430 * @size: how many bytes of memory are required. 457 * @size: how many bytes of memory are required.
431 * @flags: the type of memory to allocate (see kcalloc). 458 * @flags: the type of memory to allocate (see kmalloc).
432 * @node: node to allocate from. 459 * @node: node to allocate from.
433 * 460 *
434 * kmalloc() for non-local nodes, used to allocate from a specific node 461 * kmalloc() for non-local nodes, used to allocate from a specific node
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index f28e14a12e3f..095a5a4a8516 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -18,14 +18,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
18 return __kmalloc_node(size, flags, node); 18 return __kmalloc_node(size, flags, node);
19} 19}
20 20
21/**
22 * kmalloc - allocate memory
23 * @size: how many bytes of memory are required.
24 * @flags: the type of memory to allocate (see kcalloc).
25 *
26 * kmalloc is the normal method of allocating memory
27 * in the kernel.
28 */
29static __always_inline void *kmalloc(size_t size, gfp_t flags) 21static __always_inline void *kmalloc(size_t size, gfp_t flags)
30{ 22{
31 return __kmalloc_node(size, flags, NUMA_NO_NODE); 23 return __kmalloc_node(size, flags, NUMA_NO_NODE);