diff options
author | Nick Piggin <npiggin@suse.de> | 2007-07-19 04:47:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-19 13:04:41 -0400 |
commit | d0217ac04ca6591841e5665f518e38064f4e65bd (patch) | |
tree | d3309094bb734d34773f97d642593e298a5cfcfc /include/linux | |
parent | ed2f2f9b3ff8debdf512f7687b232c3c1d7d60d7 (diff) |
mm: fault feedback #1
Change ->fault prototype. We now return an int, which contains
VM_FAULT_xxx code in the low byte, and FAULT_RET_xxx code in the next byte.
FAULT_RET_ code tells the VM whether a page was found, whether it has been
locked, and potentially other things. This is not quite the way he wanted
it yet, but that's changed in the next patch (which requires changes to
arch code).
This means we no longer set VM_CAN_INVALIDATE in the vma in order to say
that a page is locked which requires filemap_nopage to go away (because we
can no longer remain backward compatible without that flag), but we were
going to do that anyway.
struct fault_data is renamed to struct vm_fault as Linus asked. address
is now a void __user * that we should firmly encourage drivers not to use
without really good reason.
The page is now returned via a page pointer in the vm_fault struct.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/mm.h | 84 |
1 files changed, 50 insertions, 34 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index f28a1b3e63a9..ff0b8844bd5a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -168,12 +168,7 @@ extern unsigned int kobjsize(const void *objp); | |||
168 | #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ | 168 | #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ |
169 | #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ | 169 | #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ |
170 | 170 | ||
171 | #define VM_CAN_INVALIDATE 0x08000000 /* The mapping may be invalidated, | 171 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ |
172 | * eg. truncate or invalidate_inode_*. | ||
173 | * In this case, do_no_page must | ||
174 | * return with the page locked. | ||
175 | */ | ||
176 | #define VM_CAN_NONLINEAR 0x10000000 /* Has ->fault & does nonlinear pages */ | ||
177 | 172 | ||
178 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ | 173 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
179 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS | 174 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
@@ -197,24 +192,44 @@ extern unsigned int kobjsize(const void *objp); | |||
197 | */ | 192 | */ |
198 | extern pgprot_t protection_map[16]; | 193 | extern pgprot_t protection_map[16]; |
199 | 194 | ||
200 | #define FAULT_FLAG_WRITE 0x01 | 195 | #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ |
201 | #define FAULT_FLAG_NONLINEAR 0x02 | 196 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ |
197 | |||
198 | |||
199 | #define FAULT_RET_NOPAGE 0x0100 /* ->fault did not return a page. This | ||
200 | * can be used if the handler installs | ||
201 | * their own pte. | ||
202 | */ | ||
203 | #define FAULT_RET_LOCKED 0x0200 /* ->fault locked the page, caller must | ||
204 | * unlock after installing the mapping. | ||
205 | * This is used by pagecache in | ||
206 | * particular, where the page lock is | ||
207 | * used to synchronise against truncate | ||
208 | * and invalidate. Mutually exclusive | ||
209 | * with FAULT_RET_NOPAGE. | ||
210 | */ | ||
202 | 211 | ||
203 | /* | 212 | /* |
204 | * fault_data is filled in the the pagefault handler and passed to the | 213 | * vm_fault is filled by the the pagefault handler and passed to the vma's |
205 | * vma's ->fault function. That function is responsible for filling in | 214 | * ->fault function. The vma's ->fault is responsible for returning the |
206 | * 'type', which is the type of fault if a page is returned, or the type | 215 | * VM_FAULT_xxx type which occupies the lowest byte of the return code, ORed |
207 | * of error if NULL is returned. | 216 | * with FAULT_RET_ flags that occupy the next byte and give details about |
217 | * how the fault was handled. | ||
208 | * | 218 | * |
209 | * pgoff should be used in favour of address, if possible. If pgoff is | 219 | * pgoff should be used in favour of virtual_address, if possible. If pgoff |
210 | * used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get | 220 | * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear |
211 | * nonlinear mapping support. | 221 | * mapping support. |
212 | */ | 222 | */ |
213 | struct fault_data { | 223 | struct vm_fault { |
214 | unsigned long address; | 224 | unsigned int flags; /* FAULT_FLAG_xxx flags */ |
215 | pgoff_t pgoff; | 225 | pgoff_t pgoff; /* Logical page offset based on vma */ |
216 | unsigned int flags; | 226 | void __user *virtual_address; /* Faulting virtual address */ |
217 | int type; | 227 | |
228 | struct page *page; /* ->fault handlers should return a | ||
229 | * page here, unless FAULT_RET_NOPAGE | ||
230 | * is set (which is also implied by | ||
231 | * VM_FAULT_OOM or SIGBUS). | ||
232 | */ | ||
218 | }; | 233 | }; |
219 | 234 | ||
220 | /* | 235 | /* |
@@ -225,15 +240,11 @@ struct fault_data { | |||
225 | struct vm_operations_struct { | 240 | struct vm_operations_struct { |
226 | void (*open)(struct vm_area_struct * area); | 241 | void (*open)(struct vm_area_struct * area); |
227 | void (*close)(struct vm_area_struct * area); | 242 | void (*close)(struct vm_area_struct * area); |
228 | struct page *(*fault)(struct vm_area_struct *vma, | 243 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); |
229 | struct fault_data *fdata); | ||
230 | struct page *(*nopage)(struct vm_area_struct *area, | 244 | struct page *(*nopage)(struct vm_area_struct *area, |
231 | unsigned long address, int *type); | 245 | unsigned long address, int *type); |
232 | unsigned long (*nopfn)(struct vm_area_struct *area, | 246 | unsigned long (*nopfn)(struct vm_area_struct *area, |
233 | unsigned long address); | 247 | unsigned long address); |
234 | int (*populate)(struct vm_area_struct *area, unsigned long address, | ||
235 | unsigned long len, pgprot_t prot, unsigned long pgoff, | ||
236 | int nonblock); | ||
237 | 248 | ||
238 | /* notification that a previously read-only page is about to become | 249 | /* notification that a previously read-only page is about to become |
239 | * writable, if an error is returned it will cause a SIGBUS */ | 250 | * writable, if an error is returned it will cause a SIGBUS */ |
@@ -700,8 +711,14 @@ static inline int page_mapped(struct page *page) | |||
700 | * Used to decide whether a process gets delivered SIGBUS or | 711 | * Used to decide whether a process gets delivered SIGBUS or |
701 | * just gets major/minor fault counters bumped up. | 712 | * just gets major/minor fault counters bumped up. |
702 | */ | 713 | */ |
703 | #define VM_FAULT_OOM 0x00 | 714 | |
704 | #define VM_FAULT_SIGBUS 0x01 | 715 | /* |
716 | * VM_FAULT_ERROR is set for the error cases, to make some tests simpler. | ||
717 | */ | ||
718 | #define VM_FAULT_ERROR 0x20 | ||
719 | |||
720 | #define VM_FAULT_OOM (0x00 | VM_FAULT_ERROR) | ||
721 | #define VM_FAULT_SIGBUS (0x01 | VM_FAULT_ERROR) | ||
705 | #define VM_FAULT_MINOR 0x02 | 722 | #define VM_FAULT_MINOR 0x02 |
706 | #define VM_FAULT_MAJOR 0x03 | 723 | #define VM_FAULT_MAJOR 0x03 |
707 | 724 | ||
@@ -711,6 +728,11 @@ static inline int page_mapped(struct page *page) | |||
711 | */ | 728 | */ |
712 | #define VM_FAULT_WRITE 0x10 | 729 | #define VM_FAULT_WRITE 0x10 |
713 | 730 | ||
731 | /* | ||
732 | * Mask of VM_FAULT_ flags | ||
733 | */ | ||
734 | #define VM_FAULT_MASK 0xff | ||
735 | |||
714 | #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) | 736 | #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) |
715 | 737 | ||
716 | extern void show_free_areas(void); | 738 | extern void show_free_areas(void); |
@@ -793,8 +815,6 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, | |||
793 | 815 | ||
794 | extern int vmtruncate(struct inode * inode, loff_t offset); | 816 | extern int vmtruncate(struct inode * inode, loff_t offset); |
795 | extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); | 817 | extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); |
796 | extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); | ||
797 | extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); | ||
798 | 818 | ||
799 | #ifdef CONFIG_MMU | 819 | #ifdef CONFIG_MMU |
800 | extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, | 820 | extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, |
@@ -1135,11 +1155,7 @@ extern void truncate_inode_pages_range(struct address_space *, | |||
1135 | loff_t lstart, loff_t lend); | 1155 | loff_t lstart, loff_t lend); |
1136 | 1156 | ||
1137 | /* generic vm_area_ops exported for stackable file systems */ | 1157 | /* generic vm_area_ops exported for stackable file systems */ |
1138 | extern struct page *filemap_fault(struct vm_area_struct *, struct fault_data *); | 1158 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); |
1139 | extern struct page * __deprecated_for_modules | ||
1140 | filemap_nopage(struct vm_area_struct *, unsigned long, int *); | ||
1141 | extern int __deprecated_for_modules filemap_populate(struct vm_area_struct *, | ||
1142 | unsigned long, unsigned long, pgprot_t, unsigned long, int); | ||
1143 | 1159 | ||
1144 | /* mm/page-writeback.c */ | 1160 | /* mm/page-writeback.c */ |
1145 | int write_one_page(struct page *page, int wait); | 1161 | int write_one_page(struct page *page, int wait); |