aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-10-16 04:25:52 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:00 -0400
commite12ba74d8ff3e2f73a583500d7095e406df4d093 (patch)
treea0d3385b65f0b3e1e00b0bbf11b75e7538a93edb /mm
parentc361be55b3128474aa66d31092db330b07539103 (diff)
Group short-lived and reclaimable kernel allocations
This patch marks a number of allocations that are either short-lived such as network buffers or are reclaimable such as inode allocations. When something like updatedb is called, long-lived and unmovable kernel allocations tend to be spread throughout the address space which increases fragmentation. This patch groups these allocations together as much as possible by adding a new MIGRATE_TYPE. The MIGRATE_RECLAIMABLE type is for allocations that can be reclaimed on demand, but not moved. i.e. they can be migrated by deleting them and re-reading the information from elsewhere. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c3
4 files changed, 14 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d575a3ee8dd8..29f4de1423c9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -172,7 +172,10 @@ static void set_pageblock_migratetype(struct page *page, int migratetype)
172 172
173static inline int gfpflags_to_migratetype(gfp_t gfp_flags) 173static inline int gfpflags_to_migratetype(gfp_t gfp_flags)
174{ 174{
175 return ((gfp_flags & __GFP_MOVABLE) != 0); 175 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
176
177 return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
178 ((gfp_flags & __GFP_RECLAIMABLE) != 0);
176} 179}
177 180
178#else 181#else
@@ -676,8 +679,9 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
676 * the free lists for the desirable migrate type are depleted 679 * the free lists for the desirable migrate type are depleted
677 */ 680 */
678static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { 681static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
679 [MIGRATE_UNMOVABLE] = { MIGRATE_MOVABLE }, 682 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
680 [MIGRATE_MOVABLE] = { MIGRATE_UNMOVABLE }, 683 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE },
684 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
681}; 685};
682 686
683/* 687/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 855b93b3637c..76ecbac0d55b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -95,9 +95,9 @@ static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
95 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: 95 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
96 * might be reconsidered if it ever diverges from PAGE_SIZE. 96 * might be reconsidered if it ever diverges from PAGE_SIZE.
97 * 97 *
98 * __GFP_MOVABLE is masked out as swap vectors cannot move 98 * Mobility flags are masked out as swap vectors cannot move
99 */ 99 */
100 return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO, 100 return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
101 PAGE_CACHE_SHIFT-PAGE_SHIFT); 101 PAGE_CACHE_SHIFT-PAGE_SHIFT);
102} 102}
103 103
diff --git a/mm/slab.c b/mm/slab.c
index 8fb56ae685de..e34bcb87a6ee 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1643,6 +1643,8 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1643#endif 1643#endif
1644 1644
1645 flags |= cachep->gfpflags; 1645 flags |= cachep->gfpflags;
1646 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1647 flags |= __GFP_RECLAIMABLE;
1646 1648
1647 page = alloc_pages_node(nodeid, flags, cachep->gfporder); 1649 page = alloc_pages_node(nodeid, flags, cachep->gfporder);
1648 if (!page) 1650 if (!page)
diff --git a/mm/slub.c b/mm/slub.c
index 19d3202ca2dc..a90c4ffc9576 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1055,6 +1055,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1055 if (s->flags & SLAB_CACHE_DMA) 1055 if (s->flags & SLAB_CACHE_DMA)
1056 flags |= SLUB_DMA; 1056 flags |= SLUB_DMA;
1057 1057
1058 if (s->flags & SLAB_RECLAIM_ACCOUNT)
1059 flags |= __GFP_RECLAIMABLE;
1060
1058 if (node == -1) 1061 if (node == -1)
1059 page = alloc_pages(flags, s->order); 1062 page = alloc_pages(flags, s->order);
1060 else 1063 else