aboutsummaryrefslogtreecommitdiffstats
path: root/mm/msync.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-10-29 21:16:12 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:39 -0400
commitb5810039a54e5babf428e9a1e89fc1940fabff11 (patch)
tree835836cb527ec9bd525f93eb7e016f3dfb8c8ae2 /mm/msync.c
parentf9c98d0287de42221c624482fd4f8d485c98ab22 (diff)
[PATCH] core remove PageReserved
Remove PageReserved() calls from core code by tightening VM_RESERVED handling in mm/ to cover PageReserved functionality. PageReserved special casing is removed from get_page and put_page. All setting and clearing of PageReserved is retained, and it is now flagged in the page_alloc checks to help ensure we don't introduce any refcount based freeing of Reserved pages. MAP_PRIVATE, PROT_WRITE of VM_RESERVED regions is tentatively being deprecated. We never completely handled it correctly anyway, and is be reintroduced in future if required (Hugh has a proof of concept). Once PageReserved() calls are removed from kernel/power/swsusp.c, and all arch/ and driver code, the Set and Clear calls, and the PG_reserved bit can be trivially removed. Last real user of PageReserved is swsusp, which uses PageReserved to determine whether a struct page points to valid memory or not. This still needs to be addressed (a generic page_is_ram() should work). A last caveat: the ZERO_PAGE is now refcounted and managed with rmap (and thus mapcounted and count towards shared rss). These writes to the struct page could cause excessive cacheline bouncing on big systems. There are a number of ways this could be addressed if it is an issue. Signed-off-by: Nick Piggin <npiggin@suse.de> Refcount bug fix for filemap_xip.c Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/msync.c')
-rw-r--r--mm/msync.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/mm/msync.c b/mm/msync.c
index 3b5f1c521d4b..860395486060 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -25,6 +25,7 @@
25static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 25static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
26 unsigned long addr, unsigned long end) 26 unsigned long addr, unsigned long end)
27{ 27{
28 struct mm_struct *mm = vma->vm_mm;
28 pte_t *pte; 29 pte_t *pte;
29 int progress = 0; 30 int progress = 0;
30 31
@@ -37,7 +38,7 @@ again:
37 if (progress >= 64) { 38 if (progress >= 64) {
38 progress = 0; 39 progress = 0;
39 if (need_resched() || 40 if (need_resched() ||
40 need_lockbreak(&vma->vm_mm->page_table_lock)) 41 need_lockbreak(&mm->page_table_lock))
41 break; 42 break;
42 } 43 }
43 progress++; 44 progress++;
@@ -46,11 +47,11 @@ again:
46 if (!pte_maybe_dirty(*pte)) 47 if (!pte_maybe_dirty(*pte))
47 continue; 48 continue;
48 pfn = pte_pfn(*pte); 49 pfn = pte_pfn(*pte);
49 if (!pfn_valid(pfn)) 50 if (unlikely(!pfn_valid(pfn))) {
51 print_bad_pte(vma, *pte, addr);
50 continue; 52 continue;
53 }
51 page = pfn_to_page(pfn); 54 page = pfn_to_page(pfn);
52 if (PageReserved(page))
53 continue;
54 55
55 if (ptep_clear_flush_dirty(vma, addr, pte) || 56 if (ptep_clear_flush_dirty(vma, addr, pte) ||
56 page_test_and_clear_dirty(page)) 57 page_test_and_clear_dirty(page))
@@ -58,7 +59,7 @@ again:
58 progress += 3; 59 progress += 3;
59 } while (pte++, addr += PAGE_SIZE, addr != end); 60 } while (pte++, addr += PAGE_SIZE, addr != end);
60 pte_unmap(pte - 1); 61 pte_unmap(pte - 1);
61 cond_resched_lock(&vma->vm_mm->page_table_lock); 62 cond_resched_lock(&mm->page_table_lock);
62 if (addr != end) 63 if (addr != end)
63 goto again; 64 goto again;
64} 65}
@@ -102,8 +103,10 @@ static void msync_page_range(struct vm_area_struct *vma,
102 103
103 /* For hugepages we can't go walking the page table normally, 104 /* For hugepages we can't go walking the page table normally,
104 * but that's ok, hugetlbfs is memory based, so we don't need 105 * but that's ok, hugetlbfs is memory based, so we don't need
105 * to do anything more on an msync() */ 106 * to do anything more on an msync().
106 if (is_vm_hugetlb_page(vma)) 107 * Can't do anything with VM_RESERVED regions either.
108 */
109 if (vma->vm_flags & (VM_HUGETLB|VM_RESERVED))
107 return; 110 return;
108 111
109 BUG_ON(addr >= end); 112 BUG_ON(addr >= end);