aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h103
1 files changed, 67 insertions, 36 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a5c451816fdc..c456c3a1c28e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -168,6 +168,8 @@ extern unsigned int kobjsize(const void *objp);
168#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ 168#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
169#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ 169#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
170 170
171#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
172
171#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 173#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
172#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 174#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
173#endif 175#endif
@@ -190,6 +192,30 @@ extern unsigned int kobjsize(const void *objp);
190 */ 192 */
191extern pgprot_t protection_map[16]; 193extern pgprot_t protection_map[16];
192 194
195#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
196#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
197
198
199/*
200 * vm_fault is filled by the the pagefault handler and passed to the vma's
201 * ->fault function. The vma's ->fault is responsible for returning a bitmask
202 * of VM_FAULT_xxx flags that give details about how the fault was handled.
203 *
204 * pgoff should be used in favour of virtual_address, if possible. If pgoff
205 * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear
206 * mapping support.
207 */
208struct vm_fault {
209 unsigned int flags; /* FAULT_FLAG_xxx flags */
210 pgoff_t pgoff; /* Logical page offset based on vma */
211 void __user *virtual_address; /* Faulting virtual address */
212
213 struct page *page; /* ->fault handlers should return a
214 * page here, unless VM_FAULT_NOPAGE
215 * is set (which is also implied by
216 * VM_FAULT_ERROR).
217 */
218};
193 219
194/* 220/*
195 * These are the virtual MM functions - opening of an area, closing and 221 * These are the virtual MM functions - opening of an area, closing and
@@ -199,9 +225,11 @@ extern pgprot_t protection_map[16];
199struct vm_operations_struct { 225struct vm_operations_struct {
200 void (*open)(struct vm_area_struct * area); 226 void (*open)(struct vm_area_struct * area);
201 void (*close)(struct vm_area_struct * area); 227 void (*close)(struct vm_area_struct * area);
202 struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); 228 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
203 unsigned long (*nopfn)(struct vm_area_struct * area, unsigned long address); 229 struct page *(*nopage)(struct vm_area_struct *area,
204 int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); 230 unsigned long address, int *type);
231 unsigned long (*nopfn)(struct vm_area_struct *area,
232 unsigned long address);
205 233
206 /* notification that a previously read-only page is about to become 234 /* notification that a previously read-only page is about to become
207 * writable, if an error is returned it will cause a SIGBUS */ 235 * writable, if an error is returned it will cause a SIGBUS */
@@ -655,7 +683,6 @@ static inline int page_mapped(struct page *page)
655 */ 683 */
656#define NOPAGE_SIGBUS (NULL) 684#define NOPAGE_SIGBUS (NULL)
657#define NOPAGE_OOM ((struct page *) (-1)) 685#define NOPAGE_OOM ((struct page *) (-1))
658#define NOPAGE_REFAULT ((struct page *) (-2)) /* Return to userspace, rerun */
659 686
660/* 687/*
661 * Error return values for the *_nopfn functions 688 * Error return values for the *_nopfn functions
@@ -669,16 +696,18 @@ static inline int page_mapped(struct page *page)
669 * Used to decide whether a process gets delivered SIGBUS or 696 * Used to decide whether a process gets delivered SIGBUS or
670 * just gets major/minor fault counters bumped up. 697 * just gets major/minor fault counters bumped up.
671 */ 698 */
672#define VM_FAULT_OOM 0x00 699
673#define VM_FAULT_SIGBUS 0x01 700#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */
674#define VM_FAULT_MINOR 0x02 701
675#define VM_FAULT_MAJOR 0x03 702#define VM_FAULT_OOM 0x0001
676 703#define VM_FAULT_SIGBUS 0x0002
677/* 704#define VM_FAULT_MAJOR 0x0004
678 * Special case for get_user_pages. 705#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
679 * Must be in a distinct bit from the above VM_FAULT_ flags. 706
680 */ 707#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
681#define VM_FAULT_WRITE 0x10 708#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
709
710#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS)
682 711
683#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 712#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
684 713
@@ -762,20 +791,10 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
762 791
763extern int vmtruncate(struct inode * inode, loff_t offset); 792extern int vmtruncate(struct inode * inode, loff_t offset);
764extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); 793extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
765extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
766extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
767 794
768#ifdef CONFIG_MMU 795#ifdef CONFIG_MMU
769extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, 796extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
770 unsigned long address, int write_access); 797 unsigned long address, int write_access);
771
772static inline int handle_mm_fault(struct mm_struct *mm,
773 struct vm_area_struct *vma, unsigned long address,
774 int write_access)
775{
776 return __handle_mm_fault(mm, vma, address, write_access) &
777 (~VM_FAULT_WRITE);
778}
779#else 798#else
780static inline int handle_mm_fault(struct mm_struct *mm, 799static inline int handle_mm_fault(struct mm_struct *mm,
781 struct vm_area_struct *vma, unsigned long address, 800 struct vm_area_struct *vma, unsigned long address,
@@ -789,7 +808,6 @@ static inline int handle_mm_fault(struct mm_struct *mm,
789 808
790extern int make_pages_present(unsigned long addr, unsigned long end); 809extern int make_pages_present(unsigned long addr, unsigned long end);
791extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 810extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
792void install_arg_page(struct vm_area_struct *, struct page *, unsigned long);
793 811
794int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, 812int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
795 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); 813 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
@@ -806,9 +824,15 @@ int FASTCALL(set_page_dirty(struct page *page));
806int set_page_dirty_lock(struct page *page); 824int set_page_dirty_lock(struct page *page);
807int clear_page_dirty_for_io(struct page *page); 825int clear_page_dirty_for_io(struct page *page);
808 826
827extern unsigned long move_page_tables(struct vm_area_struct *vma,
828 unsigned long old_addr, struct vm_area_struct *new_vma,
829 unsigned long new_addr, unsigned long len);
809extern unsigned long do_mremap(unsigned long addr, 830extern unsigned long do_mremap(unsigned long addr,
810 unsigned long old_len, unsigned long new_len, 831 unsigned long old_len, unsigned long new_len,
811 unsigned long flags, unsigned long new_addr); 832 unsigned long flags, unsigned long new_addr);
833extern int mprotect_fixup(struct vm_area_struct *vma,
834 struct vm_area_struct **pprev, unsigned long start,
835 unsigned long end, unsigned long newflags);
812 836
813/* 837/*
814 * A callback you can register to apply pressure to ageable caches. 838 * A callback you can register to apply pressure to ageable caches.
@@ -1104,9 +1128,7 @@ extern void truncate_inode_pages_range(struct address_space *,
1104 loff_t lstart, loff_t lend); 1128 loff_t lstart, loff_t lend);
1105 1129
1106/* generic vm_area_ops exported for stackable file systems */ 1130/* generic vm_area_ops exported for stackable file systems */
1107extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *); 1131extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1108extern int filemap_populate(struct vm_area_struct *, unsigned long,
1109 unsigned long, pgprot_t, unsigned long, int);
1110 1132
1111/* mm/page-writeback.c */ 1133/* mm/page-writeback.c */
1112int write_one_page(struct page *page, int wait); 1134int write_one_page(struct page *page, int wait);
@@ -1121,13 +1143,20 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
1121 pgoff_t offset, unsigned long nr_to_read); 1143 pgoff_t offset, unsigned long nr_to_read);
1122int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 1144int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1123 pgoff_t offset, unsigned long nr_to_read); 1145 pgoff_t offset, unsigned long nr_to_read);
1124unsigned long page_cache_readahead(struct address_space *mapping, 1146
1125 struct file_ra_state *ra, 1147void page_cache_sync_readahead(struct address_space *mapping,
1126 struct file *filp, 1148 struct file_ra_state *ra,
1127 pgoff_t offset, 1149 struct file *filp,
1128 unsigned long size); 1150 pgoff_t offset,
1129void handle_ra_miss(struct address_space *mapping, 1151 unsigned long size);
1130 struct file_ra_state *ra, pgoff_t offset); 1152
1153void page_cache_async_readahead(struct address_space *mapping,
1154 struct file_ra_state *ra,
1155 struct file *filp,
1156 struct page *pg,
1157 pgoff_t offset,
1158 unsigned long size);
1159
1131unsigned long max_sane_readahead(unsigned long nr); 1160unsigned long max_sane_readahead(unsigned long nr);
1132 1161
1133/* Do stack extension */ 1162/* Do stack extension */
@@ -1135,6 +1164,8 @@ extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1135#ifdef CONFIG_IA64 1164#ifdef CONFIG_IA64
1136extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1165extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1137#endif 1166#endif
1167extern int expand_stack_downwards(struct vm_area_struct *vma,
1168 unsigned long address);
1138 1169
1139/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 1170/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
1140extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 1171extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);