diff options
| author | Rafael J. Wysocki <rjw@sisk.pl> | 2006-09-26 02:32:49 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 11:48:59 -0400 |
| commit | f623f0db8e6aa86a37be86167e4ff478821a9f4f (patch) | |
| tree | fab12e8dc57d14101e9e512ba708b83f74551dd9 | |
| parent | e3920fb42c8ddfe63befb54d95c0e13eabacea9b (diff) | |
[PATCH] swsusp: Fix mark_free_pages
Clean up mm/page_alloc.c#mark_free_pages() and make it avoid clearing
PageNosaveFree for PageNosave pages. This allows us to get rid of an ugly
hack in kernel/power/snapshot.c#copy_data_pages().
Additionally, the page-copying loop in copy_data_pages() is moved to an
inline function.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
| -rw-r--r-- | kernel/power/snapshot.c | 27 | ||||
| -rw-r--r-- | mm/page_alloc.c | 24 |
2 files changed, 29 insertions, 22 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 81fe8de9e604..4afb7900002b 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -208,37 +208,36 @@ unsigned int count_data_pages(void) | |||
| 208 | return n; | 208 | return n; |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | static inline void copy_data_page(long *dst, long *src) | ||
| 212 | { | ||
| 213 | int n; | ||
| 214 | |||
| 215 | /* copy_page and memcpy are not usable for copying task structs. */ | ||
| 216 | for (n = PAGE_SIZE / sizeof(long); n; n--) | ||
| 217 | *dst++ = *src++; | ||
| 218 | } | ||
| 219 | |||
| 211 | static void copy_data_pages(struct pbe *pblist) | 220 | static void copy_data_pages(struct pbe *pblist) |
| 212 | { | 221 | { |
| 213 | struct zone *zone; | 222 | struct zone *zone; |
| 214 | unsigned long pfn, max_zone_pfn; | 223 | unsigned long pfn, max_zone_pfn; |
| 215 | struct pbe *pbe, *p; | 224 | struct pbe *pbe; |
| 216 | 225 | ||
| 217 | pbe = pblist; | 226 | pbe = pblist; |
| 218 | for_each_zone (zone) { | 227 | for_each_zone (zone) { |
| 219 | if (is_highmem(zone)) | 228 | if (is_highmem(zone)) |
| 220 | continue; | 229 | continue; |
| 221 | mark_free_pages(zone); | 230 | mark_free_pages(zone); |
| 222 | /* This is necessary for swsusp_free() */ | ||
| 223 | for_each_pb_page (p, pblist) | ||
| 224 | SetPageNosaveFree(virt_to_page(p)); | ||
| 225 | for_each_pbe (p, pblist) | ||
| 226 | SetPageNosaveFree(virt_to_page(p->address)); | ||
| 227 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | 231 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; |
| 228 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { | 232 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { |
| 229 | struct page *page = saveable_page(pfn); | 233 | struct page *page = saveable_page(pfn); |
| 230 | 234 | ||
| 231 | if (page) { | 235 | if (page) { |
| 232 | long *src, *dst; | 236 | void *ptr = page_address(page); |
| 233 | int n; | ||
| 234 | 237 | ||
| 235 | BUG_ON(!pbe); | 238 | BUG_ON(!pbe); |
| 236 | pbe->orig_address = (unsigned long)page_address(page); | 239 | copy_data_page((void *)pbe->address, ptr); |
| 237 | /* copy_page and memcpy are not usable for copying task structs. */ | 240 | pbe->orig_address = (unsigned long)ptr; |
| 238 | dst = (long *)pbe->address; | ||
| 239 | src = (long *)pbe->orig_address; | ||
| 240 | for (n = PAGE_SIZE / sizeof(long); n; n--) | ||
| 241 | *dst++ = *src++; | ||
| 242 | pbe = pbe->next; | 241 | pbe = pbe->next; |
| 243 | } | 242 | } |
| 244 | } | 243 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 51070b6d593f..9810f0a60db7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -694,7 +694,8 @@ static void __drain_pages(unsigned int cpu) | |||
| 694 | 694 | ||
| 695 | void mark_free_pages(struct zone *zone) | 695 | void mark_free_pages(struct zone *zone) |
| 696 | { | 696 | { |
| 697 | unsigned long zone_pfn, flags; | 697 | unsigned long pfn, max_zone_pfn; |
| 698 | unsigned long flags; | ||
| 698 | int order; | 699 | int order; |
| 699 | struct list_head *curr; | 700 | struct list_head *curr; |
| 700 | 701 | ||
| @@ -702,18 +703,25 @@ void mark_free_pages(struct zone *zone) | |||
| 702 | return; | 703 | return; |
| 703 | 704 | ||
| 704 | spin_lock_irqsave(&zone->lock, flags); | 705 | spin_lock_irqsave(&zone->lock, flags); |
| 705 | for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) | 706 | |
| 706 | ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn)); | 707 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; |
| 708 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | ||
| 709 | if (pfn_valid(pfn)) { | ||
| 710 | struct page *page = pfn_to_page(pfn); | ||
| 711 | |||
| 712 | if (!PageNosave(page)) | ||
| 713 | ClearPageNosaveFree(page); | ||
| 714 | } | ||
| 707 | 715 | ||
| 708 | for (order = MAX_ORDER - 1; order >= 0; --order) | 716 | for (order = MAX_ORDER - 1; order >= 0; --order) |
| 709 | list_for_each(curr, &zone->free_area[order].free_list) { | 717 | list_for_each(curr, &zone->free_area[order].free_list) { |
| 710 | unsigned long start_pfn, i; | 718 | unsigned long i; |
| 711 | 719 | ||
| 712 | start_pfn = page_to_pfn(list_entry(curr, struct page, lru)); | 720 | pfn = page_to_pfn(list_entry(curr, struct page, lru)); |
| 721 | for (i = 0; i < (1UL << order); i++) | ||
| 722 | SetPageNosaveFree(pfn_to_page(pfn + i)); | ||
| 723 | } | ||
| 713 | 724 | ||
| 714 | for (i=0; i < (1<<order); i++) | ||
| 715 | SetPageNosaveFree(pfn_to_page(start_pfn+i)); | ||
| 716 | } | ||
| 717 | spin_unlock_irqrestore(&zone->lock, flags); | 725 | spin_unlock_irqrestore(&zone->lock, flags); |
| 718 | } | 726 | } |
| 719 | 727 | ||
