aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2006-09-26 02:32:49 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:59 -0400
commitf623f0db8e6aa86a37be86167e4ff478821a9f4f (patch)
treefab12e8dc57d14101e9e512ba708b83f74551dd9 /mm
parente3920fb42c8ddfe63befb54d95c0e13eabacea9b (diff)
[PATCH] swsusp: Fix mark_free_pages
Clean up mm/page_alloc.c#mark_free_pages() and make it avoid clearing PageNosaveFree for PageNosave pages. This allows us to get rid of an ugly hack in kernel/power/snapshot.c#copy_data_pages(). Additionally, the page-copying loop in copy_data_pages() is moved to an inline function. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: Pavel Machek <pavel@ucw.cz> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 51070b6d593f..9810f0a60db7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -694,7 +694,8 @@ static void __drain_pages(unsigned int cpu)
694 694
695void mark_free_pages(struct zone *zone) 695void mark_free_pages(struct zone *zone)
696{ 696{
697 unsigned long zone_pfn, flags; 697 unsigned long pfn, max_zone_pfn;
698 unsigned long flags;
698 int order; 699 int order;
699 struct list_head *curr; 700 struct list_head *curr;
700 701
@@ -702,18 +703,25 @@ void mark_free_pages(struct zone *zone)
702 return; 703 return;
703 704
704 spin_lock_irqsave(&zone->lock, flags); 705 spin_lock_irqsave(&zone->lock, flags);
705 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 706
706 ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn)); 707 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
708 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
709 if (pfn_valid(pfn)) {
710 struct page *page = pfn_to_page(pfn);
711
712 if (!PageNosave(page))
713 ClearPageNosaveFree(page);
714 }
707 715
708 for (order = MAX_ORDER - 1; order >= 0; --order) 716 for (order = MAX_ORDER - 1; order >= 0; --order)
709 list_for_each(curr, &zone->free_area[order].free_list) { 717 list_for_each(curr, &zone->free_area[order].free_list) {
710 unsigned long start_pfn, i; 718 unsigned long i;
711 719
712 start_pfn = page_to_pfn(list_entry(curr, struct page, lru)); 720 pfn = page_to_pfn(list_entry(curr, struct page, lru));
721 for (i = 0; i < (1UL << order); i++)
722 SetPageNosaveFree(pfn_to_page(pfn + i));
723 }
713 724
714 for (i=0; i < (1<<order); i++)
715 SetPageNosaveFree(pfn_to_page(start_pfn+i));
716 }
717 spin_unlock_irqrestore(&zone->lock, flags); 725 spin_unlock_irqrestore(&zone->lock, flags);
718} 726}
719 727