aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2010-05-24 17:32:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-25 11:06:59 -0400
commit748446bb6b5a9390b546af38ec899c868a9dbcf0 (patch)
tree4c27d0805a5e094b39ff938ad60dd270b953a79f /mm/page_alloc.c
parentc175a0ce7584e5b498fff8cbdb9aa7912aa9fbba (diff)
mm: compaction: memory compaction core
This patch is the core of a mechanism which compacts memory in a zone by relocating movable pages towards the end of the zone. A single compaction run involves a migration scanner and a free scanner. Both scanners operate on pageblock-sized areas in the zone. The migration scanner starts at the bottom of the zone and searches for all movable pages within each area, isolating them onto a private list called migratelist. The free scanner starts at the top of the zone and searches for suitable areas and consumes the free pages within making them available for the migration scanner. The pages isolated for migration are then migrated to the newly isolated free pages. [aarcange@redhat.com: Fix unsafe optimisation] [mel@csn.ul.ie: do not schedule work on other CPUs for compaction] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cefe6fe8d991..c54376a09f30 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1208,6 +1208,51 @@ void split_page(struct page *page, unsigned int order)
1208} 1208}
1209 1209
1210/* 1210/*
1211 * Similar to split_page except the page is already free. As this is only
1212 * being used for migration, the migratetype of the block also changes.
1213 * As this is called with interrupts disabled, the caller is responsible
1214 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1215 * are enabled.
1216 *
1217 * Note: this is probably too low level an operation for use in drivers.
1218 * Please consult with lkml before using this in your driver.
1219 */
1220int split_free_page(struct page *page)
1221{
1222 unsigned int order;
1223 unsigned long watermark;
1224 struct zone *zone;
1225
1226 BUG_ON(!PageBuddy(page));
1227
1228 zone = page_zone(page);
1229 order = page_order(page);
1230
1231 /* Obey watermarks as if the page was being allocated */
1232 watermark = low_wmark_pages(zone) + (1 << order);
1233 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1234 return 0;
1235
1236 /* Remove page from free list */
1237 list_del(&page->lru);
1238 zone->free_area[order].nr_free--;
1239 rmv_page_order(page);
1240 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1241
1242 /* Split into individual pages */
1243 set_page_refcounted(page);
1244 split_page(page, order);
1245
1246 if (order >= pageblock_order - 1) {
1247 struct page *endpage = page + (1 << order) - 1;
1248 for (; page < endpage; page += pageblock_nr_pages)
1249 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1250 }
1251
1252 return 1 << order;
1253}
1254
1255/*
1211 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 1256 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1212 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1257 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1213 * or two. 1258 * or two.