aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/vdso.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-10-29 21:16:12 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:39 -0400
commitb5810039a54e5babf428e9a1e89fc1940fabff11 (patch)
tree835836cb527ec9bd525f93eb7e016f3dfb8c8ae2 /arch/ppc64/kernel/vdso.c
parentf9c98d0287de42221c624482fd4f8d485c98ab22 (diff)
[PATCH] core remove PageReserved
Remove PageReserved() calls from core code by tightening VM_RESERVED handling in mm/ to cover PageReserved functionality. PageReserved special casing is removed from get_page and put_page. All setting and clearing of PageReserved is retained, and it is now flagged in the page_alloc checks to help ensure we don't introduce any refcount based freeing of Reserved pages. MAP_PRIVATE, PROT_WRITE of VM_RESERVED regions is tentatively being deprecated. We never completely handled it correctly anyway, and is be reintroduced in future if required (Hugh has a proof of concept). Once PageReserved() calls are removed from kernel/power/swsusp.c, and all arch/ and driver code, the Set and Clear calls, and the PG_reserved bit can be trivially removed. Last real user of PageReserved is swsusp, which uses PageReserved to determine whether a struct page points to valid memory or not. This still needs to be addressed (a generic page_is_ram() should work). A last caveat: the ZERO_PAGE is now refcounted and managed with rmap (and thus mapcounted and count towards shared rss). These writes to the struct page could cause excessive cacheline bouncing on big systems. There are a number of ways this could be addressed if it is an issue. Signed-off-by: Nick Piggin <npiggin@suse.de> Refcount bug fix for filemap_xip.c Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64/kernel/vdso.c')
-rw-r--r--arch/ppc64/kernel/vdso.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/arch/ppc64/kernel/vdso.c b/arch/ppc64/kernel/vdso.c
index efa985f05aca..4aacf521e3e4 100644
--- a/arch/ppc64/kernel/vdso.c
+++ b/arch/ppc64/kernel/vdso.c
@@ -176,13 +176,13 @@ static struct page * vdso_vma_nopage(struct vm_area_struct * vma,
176 return NOPAGE_SIGBUS; 176 return NOPAGE_SIGBUS;
177 177
178 /* 178 /*
179 * Last page is systemcfg, special handling here, no get_page() a 179 * Last page is systemcfg.
180 * this is a reserved page
181 */ 180 */
182 if ((vma->vm_end - address) <= PAGE_SIZE) 181 if ((vma->vm_end - address) <= PAGE_SIZE)
183 return virt_to_page(systemcfg); 182 pg = virt_to_page(systemcfg);
183 else
184 pg = virt_to_page(vbase + offset);
184 185
185 pg = virt_to_page(vbase + offset);
186 get_page(pg); 186 get_page(pg);
187 DBG(" ->page count: %d\n", page_count(pg)); 187 DBG(" ->page count: %d\n", page_count(pg));
188 188
@@ -259,7 +259,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
259 * gettimeofday will be totally dead. It's fine to use that for setting 259 * gettimeofday will be totally dead. It's fine to use that for setting
260 * breakpoints in the vDSO code pages though 260 * breakpoints in the vDSO code pages though
261 */ 261 */
262 vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 262 vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | VM_RESERVED;
263 vma->vm_flags |= mm->def_flags; 263 vma->vm_flags |= mm->def_flags;
264 vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; 264 vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
265 vma->vm_ops = &vdso_vmops; 265 vma->vm_ops = &vdso_vmops;
@@ -603,6 +603,8 @@ void __init vdso_init(void)
603 ClearPageReserved(pg); 603 ClearPageReserved(pg);
604 get_page(pg); 604 get_page(pg);
605 } 605 }
606
607 get_page(virt_to_page(systemcfg));
606} 608}
607 609
608int in_gate_area_no_task(unsigned long addr) 610int in_gate_area_no_task(unsigned long addr)