aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-07-19 04:47:05 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 13:04:41 -0400
commit83c54070ee1a2d05c89793884bea1a03f2851ed4 (patch)
treedc732f5a9b93fb7004ed23f551bd98b77cc580e0 /mm/memory.c
parentd0217ac04ca6591841e5665f518e38064f4e65bd (diff)
mm: fault feedback #2
This patch completes Linus's wish that the fault return codes be made into bit flags, which I agree makes everything nicer. This requires requires all handle_mm_fault callers to be modified (possibly the modifications should go further and do things like fault accounting in handle_mm_fault -- however that would be for another patch). [akpm@linux-foundation.org: fix alpha build] [akpm@linux-foundation.org: fix s390 build] [akpm@linux-foundation.org: fix sparc build] [akpm@linux-foundation.org: fix sparc64 build] [akpm@linux-foundation.org: fix ia64 build] Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ian Molton <spyro@f2s.com> Cc: Bryan Wu <bryan.wu@analog.com> Cc: Mikael Starvik <starvik@axis.com> Cc: David Howells <dhowells@redhat.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Hirokazu Takata <takata@linux-m32r.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Roman Zippel <zippel@linux-m68k.org> Cc: Greg Ungerer <gerg@uclinux.org> Cc: Matthew Wilcox <willy@debian.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Kazumoto Kojima <kkojima@rr.iij4u.or.jp> Cc: Richard Curnow <rc@rc0.org.uk> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Jeff Dike <jdike@addtoit.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Cc: Miles Bader <uclinux-v850@lsi.nec.co.jp> Cc: Chris Zankel <chris@zankel.net> Acked-by: Kyle McMartin <kyle@mcmartin.ca> Acked-by: Haavard Skinnemoen <hskinnemoen@atmel.com> Acked-by: Ralf Baechle <ralf@linux-mips.org> Acked-by: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> [ Still apparently needs some ARM and PPC loving - Linus ] Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c80
1 files changed, 40 insertions, 40 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 23c870479b3e..61d51da7e17c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1068,31 +1068,30 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1068 cond_resched(); 1068 cond_resched();
1069 while (!(page = follow_page(vma, start, foll_flags))) { 1069 while (!(page = follow_page(vma, start, foll_flags))) {
1070 int ret; 1070 int ret;
1071 ret = __handle_mm_fault(mm, vma, start, 1071 ret = handle_mm_fault(mm, vma, start,
1072 foll_flags & FOLL_WRITE); 1072 foll_flags & FOLL_WRITE);
1073 if (ret & VM_FAULT_ERROR) {
1074 if (ret & VM_FAULT_OOM)
1075 return i ? i : -ENOMEM;
1076 else if (ret & VM_FAULT_SIGBUS)
1077 return i ? i : -EFAULT;
1078 BUG();
1079 }
1080 if (ret & VM_FAULT_MAJOR)
1081 tsk->maj_flt++;
1082 else
1083 tsk->min_flt++;
1084
1073 /* 1085 /*
1074 * The VM_FAULT_WRITE bit tells us that do_wp_page has 1086 * The VM_FAULT_WRITE bit tells us that
1075 * broken COW when necessary, even if maybe_mkwrite 1087 * do_wp_page has broken COW when necessary,
1076 * decided not to set pte_write. We can thus safely do 1088 * even if maybe_mkwrite decided not to set
1077 * subsequent page lookups as if they were reads. 1089 * pte_write. We can thus safely do subsequent
1090 * page lookups as if they were reads.
1078 */ 1091 */
1079 if (ret & VM_FAULT_WRITE) 1092 if (ret & VM_FAULT_WRITE)
1080 foll_flags &= ~FOLL_WRITE; 1093 foll_flags &= ~FOLL_WRITE;
1081 1094
1082 switch (ret & ~VM_FAULT_WRITE) {
1083 case VM_FAULT_MINOR:
1084 tsk->min_flt++;
1085 break;
1086 case VM_FAULT_MAJOR:
1087 tsk->maj_flt++;
1088 break;
1089 case VM_FAULT_SIGBUS:
1090 return i ? i : -EFAULT;
1091 case VM_FAULT_OOM:
1092 return i ? i : -ENOMEM;
1093 default:
1094 BUG();
1095 }
1096 cond_resched(); 1095 cond_resched();
1097 } 1096 }
1098 if (pages) { 1097 if (pages) {
@@ -1639,7 +1638,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1639{ 1638{
1640 struct page *old_page, *new_page; 1639 struct page *old_page, *new_page;
1641 pte_t entry; 1640 pte_t entry;
1642 int reuse = 0, ret = VM_FAULT_MINOR; 1641 int reuse = 0, ret = 0;
1643 struct page *dirty_page = NULL; 1642 struct page *dirty_page = NULL;
1644 1643
1645 old_page = vm_normal_page(vma, address, orig_pte); 1644 old_page = vm_normal_page(vma, address, orig_pte);
@@ -1835,8 +1834,8 @@ static int unmap_mapping_range_vma(struct vm_area_struct *vma,
1835 /* 1834 /*
1836 * files that support invalidating or truncating portions of the 1835 * files that support invalidating or truncating portions of the
1837 * file from under mmaped areas must have their ->fault function 1836 * file from under mmaped areas must have their ->fault function
1838 * return a locked page (and FAULT_RET_LOCKED code). This provides 1837 * return a locked page (and set VM_FAULT_LOCKED in the return).
1839 * synchronisation against concurrent unmapping here. 1838 * This provides synchronisation against concurrent unmapping here.
1840 */ 1839 */
1841 1840
1842again: 1841again:
@@ -2140,7 +2139,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2140 struct page *page; 2139 struct page *page;
2141 swp_entry_t entry; 2140 swp_entry_t entry;
2142 pte_t pte; 2141 pte_t pte;
2143 int ret = VM_FAULT_MINOR; 2142 int ret = 0;
2144 2143
2145 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2144 if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2146 goto out; 2145 goto out;
@@ -2208,8 +2207,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2208 unlock_page(page); 2207 unlock_page(page);
2209 2208
2210 if (write_access) { 2209 if (write_access) {
2210 /* XXX: We could OR the do_wp_page code with this one? */
2211 if (do_wp_page(mm, vma, address, 2211 if (do_wp_page(mm, vma, address,
2212 page_table, pmd, ptl, pte) == VM_FAULT_OOM) 2212 page_table, pmd, ptl, pte) & VM_FAULT_OOM)
2213 ret = VM_FAULT_OOM; 2213 ret = VM_FAULT_OOM;
2214 goto out; 2214 goto out;
2215 } 2215 }
@@ -2280,7 +2280,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2280 lazy_mmu_prot_update(entry); 2280 lazy_mmu_prot_update(entry);
2281unlock: 2281unlock:
2282 pte_unmap_unlock(page_table, ptl); 2282 pte_unmap_unlock(page_table, ptl);
2283 return VM_FAULT_MINOR; 2283 return 0;
2284release: 2284release:
2285 page_cache_release(page); 2285 page_cache_release(page);
2286 goto unlock; 2286 goto unlock;
@@ -2323,11 +2323,11 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2323 2323
2324 if (likely(vma->vm_ops->fault)) { 2324 if (likely(vma->vm_ops->fault)) {
2325 ret = vma->vm_ops->fault(vma, &vmf); 2325 ret = vma->vm_ops->fault(vma, &vmf);
2326 if (unlikely(ret & (VM_FAULT_ERROR | FAULT_RET_NOPAGE))) 2326 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2327 return (ret & VM_FAULT_MASK); 2327 return ret;
2328 } else { 2328 } else {
2329 /* Legacy ->nopage path */ 2329 /* Legacy ->nopage path */
2330 ret = VM_FAULT_MINOR; 2330 ret = 0;
2331 vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret); 2331 vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
2332 /* no page was available -- either SIGBUS or OOM */ 2332 /* no page was available -- either SIGBUS or OOM */
2333 if (unlikely(vmf.page == NOPAGE_SIGBUS)) 2333 if (unlikely(vmf.page == NOPAGE_SIGBUS))
@@ -2340,7 +2340,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2340 * For consistency in subsequent calls, make the faulted page always 2340 * For consistency in subsequent calls, make the faulted page always
2341 * locked. 2341 * locked.
2342 */ 2342 */
2343 if (unlikely(!(ret & FAULT_RET_LOCKED))) 2343 if (unlikely(!(ret & VM_FAULT_LOCKED)))
2344 lock_page(vmf.page); 2344 lock_page(vmf.page);
2345 else 2345 else
2346 VM_BUG_ON(!PageLocked(vmf.page)); 2346 VM_BUG_ON(!PageLocked(vmf.page));
@@ -2356,7 +2356,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2356 ret = VM_FAULT_OOM; 2356 ret = VM_FAULT_OOM;
2357 goto out; 2357 goto out;
2358 } 2358 }
2359 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 2359 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
2360 vma, address);
2360 if (!page) { 2361 if (!page) {
2361 ret = VM_FAULT_OOM; 2362 ret = VM_FAULT_OOM;
2362 goto out; 2363 goto out;
@@ -2384,7 +2385,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2384 * is better done later. 2385 * is better done later.
2385 */ 2386 */
2386 if (!page->mapping) { 2387 if (!page->mapping) {
2387 ret = VM_FAULT_MINOR; 2388 ret = 0;
2388 anon = 1; /* no anon but release vmf.page */ 2389 anon = 1; /* no anon but release vmf.page */
2389 goto out; 2390 goto out;
2390 } 2391 }
@@ -2447,7 +2448,7 @@ out_unlocked:
2447 put_page(dirty_page); 2448 put_page(dirty_page);
2448 } 2449 }
2449 2450
2450 return (ret & VM_FAULT_MASK); 2451 return ret;
2451} 2452}
2452 2453
2453static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2454static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -2486,7 +2487,6 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
2486 spinlock_t *ptl; 2487 spinlock_t *ptl;
2487 pte_t entry; 2488 pte_t entry;
2488 unsigned long pfn; 2489 unsigned long pfn;
2489 int ret = VM_FAULT_MINOR;
2490 2490
2491 pte_unmap(page_table); 2491 pte_unmap(page_table);
2492 BUG_ON(!(vma->vm_flags & VM_PFNMAP)); 2492 BUG_ON(!(vma->vm_flags & VM_PFNMAP));
@@ -2498,7 +2498,7 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
2498 else if (unlikely(pfn == NOPFN_SIGBUS)) 2498 else if (unlikely(pfn == NOPFN_SIGBUS))
2499 return VM_FAULT_SIGBUS; 2499 return VM_FAULT_SIGBUS;
2500 else if (unlikely(pfn == NOPFN_REFAULT)) 2500 else if (unlikely(pfn == NOPFN_REFAULT))
2501 return VM_FAULT_MINOR; 2501 return 0;
2502 2502
2503 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2503 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2504 2504
@@ -2510,7 +2510,7 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
2510 set_pte_at(mm, address, page_table, entry); 2510 set_pte_at(mm, address, page_table, entry);
2511 } 2511 }
2512 pte_unmap_unlock(page_table, ptl); 2512 pte_unmap_unlock(page_table, ptl);
2513 return ret; 2513 return 0;
2514} 2514}
2515 2515
2516/* 2516/*
@@ -2531,7 +2531,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2531 pgoff_t pgoff; 2531 pgoff_t pgoff;
2532 2532
2533 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2533 if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2534 return VM_FAULT_MINOR; 2534 return 0;
2535 2535
2536 if (unlikely(!(vma->vm_flags & VM_NONLINEAR) || 2536 if (unlikely(!(vma->vm_flags & VM_NONLINEAR) ||
2537 !(vma->vm_flags & VM_CAN_NONLINEAR))) { 2537 !(vma->vm_flags & VM_CAN_NONLINEAR))) {
@@ -2615,13 +2615,13 @@ static inline int handle_pte_fault(struct mm_struct *mm,
2615 } 2615 }
2616unlock: 2616unlock:
2617 pte_unmap_unlock(pte, ptl); 2617 pte_unmap_unlock(pte, ptl);
2618 return VM_FAULT_MINOR; 2618 return 0;
2619} 2619}
2620 2620
2621/* 2621/*
2622 * By the time we get here, we already hold the mm semaphore 2622 * By the time we get here, we already hold the mm semaphore
2623 */ 2623 */
2624int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2624int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2625 unsigned long address, int write_access) 2625 unsigned long address, int write_access)
2626{ 2626{
2627 pgd_t *pgd; 2627 pgd_t *pgd;
@@ -2650,7 +2650,7 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2650 return handle_pte_fault(mm, vma, address, pte, pmd, write_access); 2650 return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
2651} 2651}
2652 2652
2653EXPORT_SYMBOL_GPL(__handle_mm_fault); 2653EXPORT_SYMBOL_GPL(handle_mm_fault);
2654 2654
2655#ifndef __PAGETABLE_PUD_FOLDED 2655#ifndef __PAGETABLE_PUD_FOLDED
2656/* 2656/*