aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-09-26 02:31:48 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:51 -0400
commit006d22d9bbb7e66279ba5cc4556b54eeaf8fd556 (patch)
tree5af5a6676af234db8836bb1e3ef71e6cf8ccb0a9 /mm
parent46a82b2d5591335277ed2930611f6acb4ce654ed (diff)
[PATCH] Optimize free_one_page
Free one_page currently adds the page to a fake list and calls free_page_bulk. Fee_page_bulk takes it off again and then calles __free_one_page. Make free_one_page go directly to __free_one_page. Saves list on / off and a temporary list in free_one_page for higher ordered pages. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e8a71657ac4a..cc6483047567 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -448,9 +448,11 @@ static void free_pages_bulk(struct zone *zone, int count,
448 448
449static void free_one_page(struct zone *zone, struct page *page, int order) 449static void free_one_page(struct zone *zone, struct page *page, int order)
450{ 450{
451 LIST_HEAD(list); 451 spin_lock(&zone->lock);
452 list_add(&page->lru, &list); 452 zone->all_unreclaimable = 0;
453 free_pages_bulk(zone, 1, &list, order); 453 zone->pages_scanned = 0;
454 __free_one_page(page, zone ,order);
455 spin_unlock(&zone->lock);
454} 456}
455 457
456static void __free_pages_ok(struct page *page, unsigned int order) 458static void __free_pages_ok(struct page *page, unsigned int order)