aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-12-17 19:20:05 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-12-17 22:28:16 -0500
commit81eabcbe0b991ddef5216f30ae91c4b226d54b6d (patch)
treef444d157d56583e034d3384a0102b5cb22d08743 /mm
parent8d936626dd00bd47cf574add458fea8a23b79611 (diff)
mm: fix page allocation for larger I/O segments
In some cases the IO subsystem is able to merge requests if the pages are adjacent in physical memory. This was achieved in the allocator by having expand() return pages in physically contiguous order in situations were a large buddy was split. However, list-based anti-fragmentation changed the order pages were returned in to avoid searching in buffered_rmqueue() for a page of the appropriate migrate type. This patch restores behaviour of rmqueue_bulk() preserving the physical order of pages returned by the allocator without incurring increased search costs for anti-fragmentation. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Mark Lord <mlord@pobox.com Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b5a58d476c1a..d73bfad1c32f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -847,8 +847,19 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
847 struct page *page = __rmqueue(zone, order, migratetype); 847 struct page *page = __rmqueue(zone, order, migratetype);
848 if (unlikely(page == NULL)) 848 if (unlikely(page == NULL))
849 break; 849 break;
850
851 /*
852 * Split buddy pages returned by expand() are received here
853 * in physical page order. The page is added to the callers and
854 * list and the list head then moves forward. From the callers
855 * perspective, the linked list is ordered by page number in
856 * some conditions. This is useful for IO devices that can
857 * merge IO requests if the physical pages are ordered
858 * properly.
859 */
850 list_add(&page->lru, list); 860 list_add(&page->lru, list);
851 set_page_private(page, migratetype); 861 set_page_private(page, migratetype);
862 list = &page->lru;
852 } 863 }
853 spin_unlock(&zone->lock); 864 spin_unlock(&zone->lock);
854 return i; 865 return i;