diff options
author | Mel Gorman <mel@csn.ul.ie> | 2007-10-16 04:25:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:43:00 -0400 |
commit | e12ba74d8ff3e2f73a583500d7095e406df4d093 (patch) | |
tree | a0d3385b65f0b3e1e00b0bbf11b75e7538a93edb /include | |
parent | c361be55b3128474aa66d31092db330b07539103 (diff) |
Group short-lived and reclaimable kernel allocations
This patch marks a number of allocations that are either short-lived such as
network buffers or are reclaimable such as inode allocations. When something
like updatedb is called, long-lived and unmovable kernel allocations tend to
be spread throughout the address space which increases fragmentation.
This patch groups these allocations together as much as possible by adding a
new MIGRATE_TYPE. The MIGRATE_RECLAIMABLE type is for allocations that can be
reclaimed on demand, but not moved. i.e. they can be migrated by deleting
them and re-reading the information from elsewhere.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/gfp.h | 15 | ||||
-rw-r--r-- | include/linux/mmzone.h | 6 | ||||
-rw-r--r-- | include/linux/pageblock-flags.h | 2 | ||||
-rw-r--r-- | include/linux/slab.h | 4 |
4 files changed, 20 insertions, 7 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index da8aa872eb6e..f8ffcd401c5f 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -48,9 +48,10 @@ struct vm_area_struct; | |||
48 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ | 48 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ |
49 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ | 49 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ |
50 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ | 50 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ |
51 | #define __GFP_MOVABLE ((__force gfp_t)0x80000u) /* Page is movable */ | 51 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ |
52 | #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ | ||
52 | 53 | ||
53 | #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ | 54 | #define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */ |
54 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) | 55 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
55 | 56 | ||
56 | /* This equals 0, but use constants in case they ever change */ | 57 | /* This equals 0, but use constants in case they ever change */ |
@@ -60,6 +61,8 @@ struct vm_area_struct; | |||
60 | #define GFP_NOIO (__GFP_WAIT) | 61 | #define GFP_NOIO (__GFP_WAIT) |
61 | #define GFP_NOFS (__GFP_WAIT | __GFP_IO) | 62 | #define GFP_NOFS (__GFP_WAIT | __GFP_IO) |
62 | #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) | 63 | #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) |
64 | #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ | ||
65 | __GFP_RECLAIMABLE) | ||
63 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) | 66 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
64 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ | 67 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ |
65 | __GFP_HIGHMEM) | 68 | __GFP_HIGHMEM) |
@@ -80,7 +83,7 @@ struct vm_area_struct; | |||
80 | #endif | 83 | #endif |
81 | 84 | ||
82 | /* This mask makes up all the page movable related flags */ | 85 | /* This mask makes up all the page movable related flags */ |
83 | #define GFP_MOVABLE_MASK (__GFP_MOVABLE) | 86 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
84 | 87 | ||
85 | /* Control page allocator reclaim behavior */ | 88 | /* Control page allocator reclaim behavior */ |
86 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | 89 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ |
@@ -129,6 +132,12 @@ static inline enum zone_type gfp_zone(gfp_t flags) | |||
129 | return base + ZONE_NORMAL; | 132 | return base + ZONE_NORMAL; |
130 | } | 133 | } |
131 | 134 | ||
135 | static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags) | ||
136 | { | ||
137 | BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); | ||
138 | return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags; | ||
139 | } | ||
140 | |||
132 | /* | 141 | /* |
133 | * There is only one page-allocator function, and two main namespaces to | 142 | * There is only one page-allocator function, and two main namespaces to |
134 | * it. The alloc_page*() variants return 'struct page *' and as such | 143 | * it. The alloc_page*() variants return 'struct page *' and as such |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 7d7e4fe0fda8..4721e9aa3ced 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -35,10 +35,12 @@ | |||
35 | 35 | ||
36 | #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY | 36 | #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY |
37 | #define MIGRATE_UNMOVABLE 0 | 37 | #define MIGRATE_UNMOVABLE 0 |
38 | #define MIGRATE_MOVABLE 1 | 38 | #define MIGRATE_RECLAIMABLE 1 |
39 | #define MIGRATE_TYPES 2 | 39 | #define MIGRATE_MOVABLE 2 |
40 | #define MIGRATE_TYPES 3 | ||
40 | #else | 41 | #else |
41 | #define MIGRATE_UNMOVABLE 0 | 42 | #define MIGRATE_UNMOVABLE 0 |
43 | #define MIGRATE_UNRECLAIMABLE 0 | ||
42 | #define MIGRATE_MOVABLE 0 | 44 | #define MIGRATE_MOVABLE 0 |
43 | #define MIGRATE_TYPES 1 | 45 | #define MIGRATE_TYPES 1 |
44 | #endif | 46 | #endif |
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 3619d52a425c..5456da6b4ade 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | /* Bit indices that affect a whole block of pages */ | 32 | /* Bit indices that affect a whole block of pages */ |
33 | enum pageblock_bits { | 33 | enum pageblock_bits { |
34 | PB_range(PB_migrate, 1), /* 1 bit required for migrate types */ | 34 | PB_range(PB_migrate, 2), /* 2 bits required for migrate types */ |
35 | NR_PAGEBLOCK_BITS | 35 | NR_PAGEBLOCK_BITS |
36 | }; | 36 | }; |
37 | 37 | ||
diff --git a/include/linux/slab.h b/include/linux/slab.h index d859354b9e51..3a5bad3ad126 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -24,12 +24,14 @@ | |||
24 | #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ | 24 | #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ |
25 | #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ | 25 | #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ |
26 | #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ | 26 | #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ |
27 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ | ||
28 | #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ | 27 | #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ |
29 | #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ | 28 | #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ |
30 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ | 29 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ |
31 | #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ | 30 | #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ |
32 | 31 | ||
32 | /* The following flags affect the page allocator grouping pages by mobility */ | ||
33 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ | ||
34 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ | ||
33 | /* | 35 | /* |
34 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. | 36 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. |
35 | * | 37 | * |