diff options
author | Nick Piggin <npiggin@suse.de> | 2007-07-19 04:47:05 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-19 13:04:41 -0400 |
commit | 83c54070ee1a2d05c89793884bea1a03f2851ed4 (patch) | |
tree | dc732f5a9b93fb7004ed23f551bd98b77cc580e0 /include/linux/mm.h | |
parent | d0217ac04ca6591841e5665f518e38064f4e65bd (diff) |
mm: fault feedback #2
This patch completes Linus's wish that the fault return codes be made into
bit flags, which I agree makes everything nicer. This requires requires
all handle_mm_fault callers to be modified (possibly the modifications
should go further and do things like fault accounting in handle_mm_fault --
however that would be for another patch).
[akpm@linux-foundation.org: fix alpha build]
[akpm@linux-foundation.org: fix s390 build]
[akpm@linux-foundation.org: fix sparc build]
[akpm@linux-foundation.org: fix sparc64 build]
[akpm@linux-foundation.org: fix ia64 build]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ian Molton <spyro@f2s.com>
Cc: Bryan Wu <bryan.wu@analog.com>
Cc: Mikael Starvik <starvik@axis.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Hirokazu Takata <takata@linux-m32r.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Greg Ungerer <gerg@uclinux.org>
Cc: Matthew Wilcox <willy@debian.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Kazumoto Kojima <kkojima@rr.iij4u.or.jp>
Cc: Richard Curnow <rc@rc0.org.uk>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Cc: Miles Bader <uclinux-v850@lsi.nec.co.jp>
Cc: Chris Zankel <chris@zankel.net>
Acked-by: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Haavard Skinnemoen <hskinnemoen@atmel.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
[ Still apparently needs some ARM and PPC loving - Linus ]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 58 |
1 files changed, 13 insertions, 45 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index ff0b8844bd5a..f8e12b3b6110 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -196,25 +196,10 @@ extern pgprot_t protection_map[16]; | |||
196 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ | 196 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ |
197 | 197 | ||
198 | 198 | ||
199 | #define FAULT_RET_NOPAGE 0x0100 /* ->fault did not return a page. This | ||
200 | * can be used if the handler installs | ||
201 | * their own pte. | ||
202 | */ | ||
203 | #define FAULT_RET_LOCKED 0x0200 /* ->fault locked the page, caller must | ||
204 | * unlock after installing the mapping. | ||
205 | * This is used by pagecache in | ||
206 | * particular, where the page lock is | ||
207 | * used to synchronise against truncate | ||
208 | * and invalidate. Mutually exclusive | ||
209 | * with FAULT_RET_NOPAGE. | ||
210 | */ | ||
211 | |||
212 | /* | 199 | /* |
213 | * vm_fault is filled by the the pagefault handler and passed to the vma's | 200 | * vm_fault is filled by the the pagefault handler and passed to the vma's |
214 | * ->fault function. The vma's ->fault is responsible for returning the | 201 | * ->fault function. The vma's ->fault is responsible for returning a bitmask |
215 | * VM_FAULT_xxx type which occupies the lowest byte of the return code, ORed | 202 | * of VM_FAULT_xxx flags that give details about how the fault was handled. |
216 | * with FAULT_RET_ flags that occupy the next byte and give details about | ||
217 | * how the fault was handled. | ||
218 | * | 203 | * |
219 | * pgoff should be used in favour of virtual_address, if possible. If pgoff | 204 | * pgoff should be used in favour of virtual_address, if possible. If pgoff |
220 | * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear | 205 | * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear |
@@ -226,9 +211,9 @@ struct vm_fault { | |||
226 | void __user *virtual_address; /* Faulting virtual address */ | 211 | void __user *virtual_address; /* Faulting virtual address */ |
227 | 212 | ||
228 | struct page *page; /* ->fault handlers should return a | 213 | struct page *page; /* ->fault handlers should return a |
229 | * page here, unless FAULT_RET_NOPAGE | 214 | * page here, unless VM_FAULT_NOPAGE |
230 | * is set (which is also implied by | 215 | * is set (which is also implied by |
231 | * VM_FAULT_OOM or SIGBUS). | 216 | * VM_FAULT_ERROR). |
232 | */ | 217 | */ |
233 | }; | 218 | }; |
234 | 219 | ||
@@ -712,26 +697,17 @@ static inline int page_mapped(struct page *page) | |||
712 | * just gets major/minor fault counters bumped up. | 697 | * just gets major/minor fault counters bumped up. |
713 | */ | 698 | */ |
714 | 699 | ||
715 | /* | 700 | #define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */ |
716 | * VM_FAULT_ERROR is set for the error cases, to make some tests simpler. | ||
717 | */ | ||
718 | #define VM_FAULT_ERROR 0x20 | ||
719 | 701 | ||
720 | #define VM_FAULT_OOM (0x00 | VM_FAULT_ERROR) | 702 | #define VM_FAULT_OOM 0x0001 |
721 | #define VM_FAULT_SIGBUS (0x01 | VM_FAULT_ERROR) | 703 | #define VM_FAULT_SIGBUS 0x0002 |
722 | #define VM_FAULT_MINOR 0x02 | 704 | #define VM_FAULT_MAJOR 0x0004 |
723 | #define VM_FAULT_MAJOR 0x03 | 705 | #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ |
724 | 706 | ||
725 | /* | 707 | #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ |
726 | * Special case for get_user_pages. | 708 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ |
727 | * Must be in a distinct bit from the above VM_FAULT_ flags. | ||
728 | */ | ||
729 | #define VM_FAULT_WRITE 0x10 | ||
730 | 709 | ||
731 | /* | 710 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS) |
732 | * Mask of VM_FAULT_ flags | ||
733 | */ | ||
734 | #define VM_FAULT_MASK 0xff | ||
735 | 711 | ||
736 | #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) | 712 | #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) |
737 | 713 | ||
@@ -817,16 +793,8 @@ extern int vmtruncate(struct inode * inode, loff_t offset); | |||
817 | extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); | 793 | extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); |
818 | 794 | ||
819 | #ifdef CONFIG_MMU | 795 | #ifdef CONFIG_MMU |
820 | extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, | 796 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
821 | unsigned long address, int write_access); | 797 | unsigned long address, int write_access); |
822 | |||
823 | static inline int handle_mm_fault(struct mm_struct *mm, | ||
824 | struct vm_area_struct *vma, unsigned long address, | ||
825 | int write_access) | ||
826 | { | ||
827 | return __handle_mm_fault(mm, vma, address, write_access) & | ||
828 | (~VM_FAULT_WRITE); | ||
829 | } | ||
830 | #else | 798 | #else |
831 | static inline int handle_mm_fault(struct mm_struct *mm, | 799 | static inline int handle_mm_fault(struct mm_struct *mm, |
832 | struct vm_area_struct *vma, unsigned long address, | 800 | struct vm_area_struct *vma, unsigned long address, |