aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-10-29 21:16:12 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:39 -0400
commitb5810039a54e5babf428e9a1e89fc1940fabff11 (patch)
tree835836cb527ec9bd525f93eb7e016f3dfb8c8ae2 /mm/rmap.c
parentf9c98d0287de42221c624482fd4f8d485c98ab22 (diff)
[PATCH] core remove PageReserved
Remove PageReserved() calls from core code by tightening VM_RESERVED handling in mm/ to cover PageReserved functionality. PageReserved special casing is removed from get_page and put_page. All setting and clearing of PageReserved is retained, and it is now flagged in the page_alloc checks to help ensure we don't introduce any refcount based freeing of Reserved pages. MAP_PRIVATE, PROT_WRITE of VM_RESERVED regions is tentatively being deprecated. We never completely handled it correctly anyway, and is be reintroduced in future if required (Hugh has a proof of concept). Once PageReserved() calls are removed from kernel/power/swsusp.c, and all arch/ and driver code, the Set and Clear calls, and the PG_reserved bit can be trivially removed. Last real user of PageReserved is swsusp, which uses PageReserved to determine whether a struct page points to valid memory or not. This still needs to be addressed (a generic page_is_ram() should work). A last caveat: the ZERO_PAGE is now refcounted and managed with rmap (and thus mapcounted and count towards shared rss). These writes to the struct page could cause excessive cacheline bouncing on big systems. There are a number of ways this could be addressed if it is an issue. Signed-off-by: Nick Piggin <npiggin@suse.de> Refcount bug fix for filemap_xip.c Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c14
1 files changed, 4 insertions, 10 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 504757624cce..f69d5342ce7f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -443,8 +443,6 @@ int page_referenced(struct page *page, int is_locked, int ignore_token)
443void page_add_anon_rmap(struct page *page, 443void page_add_anon_rmap(struct page *page,
444 struct vm_area_struct *vma, unsigned long address) 444 struct vm_area_struct *vma, unsigned long address)
445{ 445{
446 BUG_ON(PageReserved(page));
447
448 if (atomic_inc_and_test(&page->_mapcount)) { 446 if (atomic_inc_and_test(&page->_mapcount)) {
449 struct anon_vma *anon_vma = vma->anon_vma; 447 struct anon_vma *anon_vma = vma->anon_vma;
450 448
@@ -468,8 +466,7 @@ void page_add_anon_rmap(struct page *page,
468void page_add_file_rmap(struct page *page) 466void page_add_file_rmap(struct page *page)
469{ 467{
470 BUG_ON(PageAnon(page)); 468 BUG_ON(PageAnon(page));
471 if (!pfn_valid(page_to_pfn(page)) || PageReserved(page)) 469 BUG_ON(!pfn_valid(page_to_pfn(page)));
472 return;
473 470
474 if (atomic_inc_and_test(&page->_mapcount)) 471 if (atomic_inc_and_test(&page->_mapcount))
475 inc_page_state(nr_mapped); 472 inc_page_state(nr_mapped);
@@ -483,8 +480,6 @@ void page_add_file_rmap(struct page *page)
483 */ 480 */
484void page_remove_rmap(struct page *page) 481void page_remove_rmap(struct page *page)
485{ 482{
486 BUG_ON(PageReserved(page));
487
488 if (atomic_add_negative(-1, &page->_mapcount)) { 483 if (atomic_add_negative(-1, &page->_mapcount)) {
489 BUG_ON(page_mapcount(page) < 0); 484 BUG_ON(page_mapcount(page) < 0);
490 /* 485 /*
@@ -640,13 +635,13 @@ static void try_to_unmap_cluster(unsigned long cursor,
640 continue; 635 continue;
641 636
642 pfn = pte_pfn(*pte); 637 pfn = pte_pfn(*pte);
643 if (!pfn_valid(pfn)) 638 if (unlikely(!pfn_valid(pfn))) {
639 print_bad_pte(vma, *pte, address);
644 continue; 640 continue;
641 }
645 642
646 page = pfn_to_page(pfn); 643 page = pfn_to_page(pfn);
647 BUG_ON(PageAnon(page)); 644 BUG_ON(PageAnon(page));
648 if (PageReserved(page))
649 continue;
650 645
651 if (ptep_clear_flush_young(vma, address, pte)) 646 if (ptep_clear_flush_young(vma, address, pte))
652 continue; 647 continue;
@@ -808,7 +803,6 @@ int try_to_unmap(struct page *page)
808{ 803{
809 int ret; 804 int ret;
810 805
811 BUG_ON(PageReserved(page));
812 BUG_ON(!PageLocked(page)); 806 BUG_ON(!PageLocked(page));
813 807
814 if (PageAnon(page)) 808 if (PageAnon(page))