aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power/swsusp.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-10-29 21:16:12 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:39 -0400
commitb5810039a54e5babf428e9a1e89fc1940fabff11 (patch)
tree835836cb527ec9bd525f93eb7e016f3dfb8c8ae2 /kernel/power/swsusp.c
parentf9c98d0287de42221c624482fd4f8d485c98ab22 (diff)
[PATCH] core remove PageReserved
Remove PageReserved() calls from core code by tightening VM_RESERVED handling in mm/ to cover PageReserved functionality. PageReserved special casing is removed from get_page and put_page. All setting and clearing of PageReserved is retained, and it is now flagged in the page_alloc checks to help ensure we don't introduce any refcount based freeing of Reserved pages. MAP_PRIVATE, PROT_WRITE of VM_RESERVED regions is tentatively being deprecated. We never completely handled it correctly anyway, and is be reintroduced in future if required (Hugh has a proof of concept). Once PageReserved() calls are removed from kernel/power/swsusp.c, and all arch/ and driver code, the Set and Clear calls, and the PG_reserved bit can be trivially removed. Last real user of PageReserved is swsusp, which uses PageReserved to determine whether a struct page points to valid memory or not. This still needs to be addressed (a generic page_is_ram() should work). A last caveat: the ZERO_PAGE is now refcounted and managed with rmap (and thus mapcounted and count towards shared rss). These writes to the struct page could cause excessive cacheline bouncing on big systems. There are a number of ways this could be addressed if it is an issue. Signed-off-by: Nick Piggin <npiggin@suse.de> Refcount bug fix for filemap_xip.c Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/power/swsusp.c')
-rw-r--r--kernel/power/swsusp.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 10bc5ec496d7..016504ccfccf 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -578,15 +578,23 @@ static int save_highmem_zone(struct zone *zone)
578 continue; 578 continue;
579 page = pfn_to_page(pfn); 579 page = pfn_to_page(pfn);
580 /* 580 /*
581 * This condition results from rvmalloc() sans vmalloc_32() 581 * PageReserved results from rvmalloc() sans vmalloc_32()
582 * and architectural memory reservations. This should be 582 * and architectural memory reservations.
583 * corrected eventually when the cases giving rise to this 583 *
584 * are better understood. 584 * rvmalloc should not cause this, because all implementations
585 * appear to always be using vmalloc_32 on architectures with
586 * highmem. This is a good thing, because we would like to save
587 * rvmalloc pages.
588 *
589 * It appears to be triggered by pages which do not point to
590 * valid memory (see arch/i386/mm/init.c:one_highpage_init(),
591 * which sets PageReserved if the page does not point to valid
592 * RAM.
593 *
594 * XXX: must remove usage of PageReserved!
585 */ 595 */
586 if (PageReserved(page)) { 596 if (PageReserved(page))
587 printk("highmem reserved page?!\n");
588 continue; 597 continue;
589 }
590 BUG_ON(PageNosave(page)); 598 BUG_ON(PageNosave(page));
591 if (PageNosaveFree(page)) 599 if (PageNosaveFree(page))
592 continue; 600 continue;
@@ -672,10 +680,9 @@ static int saveable(struct zone * zone, unsigned long * zone_pfn)
672 return 0; 680 return 0;
673 681
674 page = pfn_to_page(pfn); 682 page = pfn_to_page(pfn);
675 BUG_ON(PageReserved(page) && PageNosave(page));
676 if (PageNosave(page)) 683 if (PageNosave(page))
677 return 0; 684 return 0;
678 if (PageReserved(page) && pfn_is_nosave(pfn)) { 685 if (pfn_is_nosave(pfn)) {
679 pr_debug("[nosave pfn 0x%lx]", pfn); 686 pr_debug("[nosave pfn 0x%lx]", pfn);
680 return 0; 687 return 0;
681 } 688 }