aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorAndy Whitcroft <apw@shadowen.org>2007-07-17 07:03:16 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:22:59 -0400
commit5ad333eb66ff1e52a87639822ae088577669dcf9 (patch)
treeaddae6bbd19585f19328f309924d06d647e8f2b7 /include/linux
parent7e63efef857575320fb413fbc3d0ee704b72845f (diff)
Lumpy Reclaim V4
When we are out of memory of a suitable size we enter reclaim. The current reclaim algorithm targets pages in LRU order, which is great for fairness at order-0 but highly unsuitable if you desire pages at higher orders. To get pages of higher order we must shoot down a very high proportion of memory; >95% in a lot of cases. This patch set adds a lumpy reclaim algorithm to the allocator. It targets groups of pages at the specified order anchored at the end of the active and inactive lists. This encourages groups of pages at the requested orders to move from active to inactive, and active to free lists. This behaviour is only triggered out of direct reclaim when higher order pages have been requested. This patch set is particularly effective when utilised with an anti-fragmentation scheme which groups pages of similar reclaimability together. This patch set is based on Peter Zijlstra's lumpy reclaim V2 patch which forms the foundation. Credit to Mel Gorman for sanitity checking. Mel said: The patches have an application with hugepage pool resizing. When lumpy-reclaim is used used with ZONE_MOVABLE, the hugepages pool can be resized with greater reliability. Testing on a desktop machine with 2GB of RAM showed that growing the hugepage pool with ZONE_MOVABLE on it's own was very slow as the success rate was quite low. Without lumpy-reclaim, each attempt to grow the pool by 100 pages would yield 1 or 2 hugepages. With lumpy-reclaim, getting 40 to 70 hugepages on each attempt was typical. [akpm@osdl.org: ia64 pfn_to_nid fixes and loop cleanup] [bunk@stusta.de: static declarations for internal functions] [a.p.zijlstra@chello.nl: initial lumpy V2 implementation] Signed-off-by: Andy Whitcroft <apw@shadowen.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Bob Picco <bob.picco@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--include/linux/swap.h3
2 files changed, 10 insertions, 1 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d71ff763c9df..da8eb8ad9e9b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -24,6 +24,14 @@
24#endif 24#endif
25#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) 25#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
26 26
27/*
28 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
29 * costly to service. That is between allocation orders which should
30 * coelesce naturally under reasonable reclaim pressure and those which
31 * will not.
32 */
33#define PAGE_ALLOC_COSTLY_ORDER 3
34
27struct free_area { 35struct free_area {
28 struct list_head free_list; 36 struct list_head free_list;
29 unsigned long nr_free; 37 unsigned long nr_free;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 006868881346..665f85f2a3af 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -188,7 +188,8 @@ extern int rotate_reclaimable_page(struct page *page);
188extern void swap_setup(void); 188extern void swap_setup(void);
189 189
190/* linux/mm/vmscan.c */ 190/* linux/mm/vmscan.c */
191extern unsigned long try_to_free_pages(struct zone **, gfp_t); 191extern unsigned long try_to_free_pages(struct zone **zones, int order,
192 gfp_t gfp_mask);
192extern unsigned long shrink_all_memory(unsigned long nr_pages); 193extern unsigned long shrink_all_memory(unsigned long nr_pages);
193extern int vm_swappiness; 194extern int vm_swappiness;
194extern int remove_mapping(struct address_space *mapping, struct page *page); 195extern int remove_mapping(struct address_space *mapping, struct page *page);