aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h38
1 files changed, 4 insertions, 34 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c456c3a1c28..655094dc944 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -10,7 +10,6 @@
10#include <linux/mmzone.h> 10#include <linux/mmzone.h>
11#include <linux/rbtree.h> 11#include <linux/rbtree.h>
12#include <linux/prio_tree.h> 12#include <linux/prio_tree.h>
13#include <linux/fs.h>
14#include <linux/mutex.h> 13#include <linux/mutex.h>
15#include <linux/debug_locks.h> 14#include <linux/debug_locks.h>
16#include <linux/backing-dev.h> 15#include <linux/backing-dev.h>
@@ -18,7 +17,9 @@
18 17
19struct mempolicy; 18struct mempolicy;
20struct anon_vma; 19struct anon_vma;
20struct file_ra_state;
21struct user_struct; 21struct user_struct;
22struct writeback_control;
22 23
23#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ 24#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
24extern unsigned long max_mapnr; 25extern unsigned long max_mapnr;
@@ -861,38 +862,7 @@ struct shrinker {
861extern void register_shrinker(struct shrinker *); 862extern void register_shrinker(struct shrinker *);
862extern void unregister_shrinker(struct shrinker *); 863extern void unregister_shrinker(struct shrinker *);
863 864
864/* 865int vma_wants_writenotify(struct vm_area_struct *vma);
865 * Some shared mappigns will want the pages marked read-only
866 * to track write events. If so, we'll downgrade vm_page_prot
867 * to the private version (using protection_map[] without the
868 * VM_SHARED bit).
869 */
870static inline int vma_wants_writenotify(struct vm_area_struct *vma)
871{
872 unsigned int vm_flags = vma->vm_flags;
873
874 /* If it was private or non-writable, the write bit is already clear */
875 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
876 return 0;
877
878 /* The backer wishes to know when pages are first written to? */
879 if (vma->vm_ops && vma->vm_ops->page_mkwrite)
880 return 1;
881
882 /* The open routine did something to the protections already? */
883 if (pgprot_val(vma->vm_page_prot) !=
884 pgprot_val(protection_map[vm_flags &
885 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]))
886 return 0;
887
888 /* Specialty mapping? */
889 if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
890 return 0;
891
892 /* Can the mapping track the dirty pages? */
893 return vma->vm_file && vma->vm_file->f_mapping &&
894 mapping_cap_account_dirty(vma->vm_file->f_mapping);
895}
896 866
897extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); 867extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));
898 868
@@ -1246,7 +1216,7 @@ void drop_slab(void);
1246extern int randomize_va_space; 1216extern int randomize_va_space;
1247#endif 1217#endif
1248 1218
1249__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma); 1219const char * arch_vma_name(struct vm_area_struct *vma);
1250 1220
1251#endif /* __KERNEL__ */ 1221#endif /* __KERNEL__ */
1252#endif /* _LINUX_MM_H */ 1222#endif /* _LINUX_MM_H */