aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-10-16 04:25:52 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:00 -0400
commite12ba74d8ff3e2f73a583500d7095e406df4d093 (patch)
treea0d3385b65f0b3e1e00b0bbf11b75e7538a93edb /include/linux/slab.h
parentc361be55b3128474aa66d31092db330b07539103 (diff)
Group short-lived and reclaimable kernel allocations
This patch marks a number of allocations that are either short-lived such as network buffers or are reclaimable such as inode allocations. When something like updatedb is called, long-lived and unmovable kernel allocations tend to be spread throughout the address space which increases fragmentation. This patch groups these allocations together as much as possible by adding a new MIGRATE_TYPE. The MIGRATE_RECLAIMABLE type is for allocations that can be reclaimed on demand, but not moved. i.e. they can be migrated by deleting them and re-reading the information from elsewhere. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index d859354b9e51..3a5bad3ad126 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -24,12 +24,14 @@
24#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 24#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
25#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 25#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
26#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 26#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
27#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
28#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 27#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
29#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 28#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
30#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 29#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
31#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 30#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
32 31
32/* The following flags affect the page allocator grouping pages by mobility */
33#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
34#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
33/* 35/*
34 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 36 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
35 * 37 *