aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h34
1 files changed, 26 insertions, 8 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 646bc36b4d1b..8bd74558c0e4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -309,10 +309,27 @@ struct vm_fault {
309 * VM_FAULT_DAX_LOCKED and fill in 309 * VM_FAULT_DAX_LOCKED and fill in
310 * entry here. 310 * entry here.
311 */ 311 */
312 /* for ->map_pages() only */ 312};
313 pgoff_t max_pgoff; /* map pages for offset from pgoff till 313
314 * max_pgoff inclusive */ 314/*
315 pte_t *pte; /* pte entry associated with ->pgoff */ 315 * Page fault context: passes though page fault handler instead of endless list
316 * of function arguments.
317 */
318struct fault_env {
319 struct vm_area_struct *vma; /* Target VMA */
320 unsigned long address; /* Faulting virtual address */
321 unsigned int flags; /* FAULT_FLAG_xxx flags */
322 pmd_t *pmd; /* Pointer to pmd entry matching
323 * the 'address'
324 */
325 pte_t *pte; /* Pointer to pte entry matching
326 * the 'address'. NULL if the page
327 * table hasn't been allocated.
328 */
329 spinlock_t *ptl; /* Page table lock.
330 * Protects pte page table if 'pte'
331 * is not NULL, otherwise pmd.
332 */
316}; 333};
317 334
318/* 335/*
@@ -327,7 +344,8 @@ struct vm_operations_struct {
327 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 344 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
328 int (*pmd_fault)(struct vm_area_struct *, unsigned long address, 345 int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
329 pmd_t *, unsigned int flags); 346 pmd_t *, unsigned int flags);
330 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); 347 void (*map_pages)(struct fault_env *fe,
348 pgoff_t start_pgoff, pgoff_t end_pgoff);
331 349
332 /* notification that a previously read-only page is about to become 350 /* notification that a previously read-only page is about to become
333 * writable, if an error is returned it will cause a SIGBUS */ 351 * writable, if an error is returned it will cause a SIGBUS */
@@ -600,8 +618,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
600 return pte; 618 return pte;
601} 619}
602 620
603void do_set_pte(struct vm_area_struct *vma, unsigned long address, 621void do_set_pte(struct fault_env *fe, struct page *page);
604 struct page *page, pte_t *pte, bool write, bool anon);
605#endif 622#endif
606 623
607/* 624/*
@@ -2062,7 +2079,8 @@ extern void truncate_inode_pages_final(struct address_space *);
2062 2079
2063/* generic vm_area_ops exported for stackable file systems */ 2080/* generic vm_area_ops exported for stackable file systems */
2064extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); 2081extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
2065extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf); 2082extern void filemap_map_pages(struct fault_env *fe,
2083 pgoff_t start_pgoff, pgoff_t end_pgoff);
2066extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 2084extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
2067 2085
2068/* mm/page-writeback.c */ 2086/* mm/page-writeback.c */