aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h56
1 files changed, 47 insertions, 9 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 581703d86fbd..6507dde38b16 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -137,7 +137,8 @@ extern unsigned int kobjsize(const void *objp);
137#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ) 137#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
138 138
139/* 139/*
140 * special vmas that are non-mergable, non-mlock()able 140 * Special vmas that are non-mergable, non-mlock()able.
141 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
141 */ 142 */
142#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) 143#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
143 144
@@ -151,6 +152,7 @@ extern pgprot_t protection_map[16];
151#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ 152#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
152#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ 153#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
153#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ 154#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
155#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
154 156
155/* 157/*
156 * This interface is used by x86 PAT code to identify a pfn mapping that is 158 * This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -607,7 +609,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
607#endif 609#endif
608 610
609/* 611/*
610 * Define the bit shifts to access each section. For non-existant 612 * Define the bit shifts to access each section. For non-existent
611 * sections we define the shift as 0; that plus a 0 mask ensures 613 * sections we define the shift as 0; that plus a 0 mask ensures
612 * the compiler will optimise away reference to them. 614 * the compiler will optimise away reference to them.
613 */ 615 */
@@ -859,7 +861,14 @@ extern void pagefault_out_of_memory(void);
859 861
860#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 862#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
861 863
864/*
865 * Flags passed to show_mem() and __show_free_areas() to suppress output in
866 * various contexts.
867 */
868#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */
869
862extern void show_free_areas(void); 870extern void show_free_areas(void);
871extern void __show_free_areas(unsigned int flags);
863 872
864int shmem_lock(struct file *file, int lock, struct user_struct *user); 873int shmem_lock(struct file *file, int lock, struct user_struct *user);
865struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); 874struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
@@ -906,6 +915,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
906 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry 915 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
907 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry 916 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
908 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 917 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
918 * this handler is required to be able to handle
919 * pmd_trans_huge() pmds. They may simply choose to
920 * split_huge_page() instead of handling it explicitly.
909 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 921 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
910 * @pte_hole: if set, called for each hole at all levels 922 * @pte_hole: if set, called for each hole at all levels
911 * @hugetlb_entry: if set, called for each hugetlb entry 923 * @hugetlb_entry: if set, called for each hugetlb entry
@@ -971,6 +983,8 @@ static inline int handle_mm_fault(struct mm_struct *mm,
971 983
972extern int make_pages_present(unsigned long addr, unsigned long end); 984extern int make_pages_present(unsigned long addr, unsigned long end);
973extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 985extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
986extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
987 void *buf, int len, int write);
974 988
975int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 989int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
976 unsigned long start, int len, unsigned int foll_flags, 990 unsigned long start, int len, unsigned int foll_flags,
@@ -997,11 +1011,33 @@ int set_page_dirty_lock(struct page *page);
997int clear_page_dirty_for_io(struct page *page); 1011int clear_page_dirty_for_io(struct page *page);
998 1012
999/* Is the vma a continuation of the stack vma above it? */ 1013/* Is the vma a continuation of the stack vma above it? */
1000static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) 1014static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1001{ 1015{
1002 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); 1016 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1003} 1017}
1004 1018
1019static inline int stack_guard_page_start(struct vm_area_struct *vma,
1020 unsigned long addr)
1021{
1022 return (vma->vm_flags & VM_GROWSDOWN) &&
1023 (vma->vm_start == addr) &&
1024 !vma_growsdown(vma->vm_prev, addr);
1025}
1026
1027/* Is the vma a continuation of the stack vma below it? */
1028static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1029{
1030 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1031}
1032
1033static inline int stack_guard_page_end(struct vm_area_struct *vma,
1034 unsigned long addr)
1035{
1036 return (vma->vm_flags & VM_GROWSUP) &&
1037 (vma->vm_end == addr) &&
1038 !vma_growsup(vma->vm_next, addr);
1039}
1040
1005extern unsigned long move_page_tables(struct vm_area_struct *vma, 1041extern unsigned long move_page_tables(struct vm_area_struct *vma,
1006 unsigned long old_addr, struct vm_area_struct *new_vma, 1042 unsigned long old_addr, struct vm_area_struct *new_vma,
1007 unsigned long new_addr, unsigned long len); 1043 unsigned long new_addr, unsigned long len);
@@ -1347,7 +1383,7 @@ extern void setup_per_zone_wmarks(void);
1347extern void calculate_zone_inactive_ratio(struct zone *zone); 1383extern void calculate_zone_inactive_ratio(struct zone *zone);
1348extern void mem_init(void); 1384extern void mem_init(void);
1349extern void __init mmap_init(void); 1385extern void __init mmap_init(void);
1350extern void show_mem(void); 1386extern void show_mem(unsigned int flags);
1351extern void si_meminfo(struct sysinfo * val); 1387extern void si_meminfo(struct sysinfo * val);
1352extern void si_meminfo_node(struct sysinfo *val, int nid); 1388extern void si_meminfo_node(struct sysinfo *val, int nid);
1353extern int after_bootmem; 1389extern int after_bootmem;
@@ -1537,6 +1573,8 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
1537#define FOLL_GET 0x04 /* do get_page on page */ 1573#define FOLL_GET 0x04 /* do get_page on page */
1538#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ 1574#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
1539#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ 1575#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
1576#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
1577 * and return without waiting upon it */
1540#define FOLL_MLOCK 0x40 /* mark page as mlocked */ 1578#define FOLL_MLOCK 0x40 /* mark page as mlocked */
1541#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ 1579#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
1542#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 1580#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
@@ -1578,13 +1616,13 @@ static inline bool kernel_page_present(struct page *page) { return true; }
1578#endif /* CONFIG_HIBERNATION */ 1616#endif /* CONFIG_HIBERNATION */
1579#endif 1617#endif
1580 1618
1581extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk); 1619extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
1582#ifdef __HAVE_ARCH_GATE_AREA 1620#ifdef __HAVE_ARCH_GATE_AREA
1583int in_gate_area_no_task(unsigned long addr); 1621int in_gate_area_no_mm(unsigned long addr);
1584int in_gate_area(struct task_struct *task, unsigned long addr); 1622int in_gate_area(struct mm_struct *mm, unsigned long addr);
1585#else 1623#else
1586int in_gate_area_no_task(unsigned long addr); 1624int in_gate_area_no_mm(unsigned long addr);
1587#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);}) 1625#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
1588#endif /* __HAVE_ARCH_GATE_AREA */ 1626#endif /* __HAVE_ARCH_GATE_AREA */
1589 1627
1590int drop_caches_sysctl_handler(struct ctl_table *, int, 1628int drop_caches_sysctl_handler(struct ctl_table *, int,