aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h35
1 files changed, 23 insertions, 12 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d87a5a5fe87d..c61ba10768ea 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -7,6 +7,7 @@
7 7
8#include <linux/gfp.h> 8#include <linux/gfp.h>
9#include <linux/list.h> 9#include <linux/list.h>
10#include <linux/mmdebug.h>
10#include <linux/mmzone.h> 11#include <linux/mmzone.h>
11#include <linux/rbtree.h> 12#include <linux/rbtree.h>
12#include <linux/prio_tree.h> 13#include <linux/prio_tree.h>
@@ -73,7 +74,7 @@ extern unsigned int kobjsize(const void *objp);
73#endif 74#endif
74 75
75/* 76/*
76 * vm_flags.. 77 * vm_flags in vm_area_struct, see mm_types.h.
77 */ 78 */
78#define VM_READ 0x00000001 /* currently active flags */ 79#define VM_READ 0x00000001 /* currently active flags */
79#define VM_WRITE 0x00000002 80#define VM_WRITE 0x00000002
@@ -219,12 +220,6 @@ struct inode;
219 */ 220 */
220#include <linux/page-flags.h> 221#include <linux/page-flags.h>
221 222
222#ifdef CONFIG_DEBUG_VM
223#define VM_BUG_ON(cond) BUG_ON(cond)
224#else
225#define VM_BUG_ON(condition) do { } while(0)
226#endif
227
228/* 223/*
229 * Methods to modify the page usage count. 224 * Methods to modify the page usage count.
230 * 225 *
@@ -744,6 +739,8 @@ struct zap_details {
744struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 739struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
745 pte_t pte); 740 pte_t pte);
746 741
742int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
743 unsigned long size);
747unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 744unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
748 unsigned long size, struct zap_details *); 745 unsigned long size, struct zap_details *);
749unsigned long unmap_vmas(struct mmu_gather **tlb, 746unsigned long unmap_vmas(struct mmu_gather **tlb,
@@ -810,7 +807,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
810 807
811int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, 808int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
812 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); 809 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
813void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long);
814 810
815extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 811extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
816extern void do_invalidatepage(struct page *page, unsigned long offset); 812extern void do_invalidatepage(struct page *page, unsigned long offset);
@@ -834,6 +830,19 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
834 unsigned long end, unsigned long newflags); 830 unsigned long end, unsigned long newflags);
835 831
836/* 832/*
833 * get_user_pages_fast provides equivalent functionality to get_user_pages,
834 * operating on current and current->mm (force=0 and doesn't return any vmas).
835 *
836 * get_user_pages_fast may take mmap_sem and page tables, so no assumptions
837 * can be made about locking. get_user_pages_fast is to be implemented in a
838 * way that is advantageous (vs get_user_pages()) when the user memory area is
839 * already faulted in and present in ptes. However if the pages have to be
840 * faulted in, it may turn out to be slightly slower).
841 */
842int get_user_pages_fast(unsigned long start, int nr_pages, int write,
843 struct page **pages);
844
845/*
837 * A callback you can register to apply pressure to ageable caches. 846 * A callback you can register to apply pressure to ageable caches.
838 * 847 *
839 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should 848 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
@@ -905,7 +914,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
905} 914}
906#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 915#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
907 916
908#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 917#if USE_SPLIT_PTLOCKS
909/* 918/*
910 * We tuck a spinlock to guard each pagetable page into its struct page, 919 * We tuck a spinlock to guard each pagetable page into its struct page,
911 * at page->private, with BUILD_BUG_ON to make sure that this will not 920 * at page->private, with BUILD_BUG_ON to make sure that this will not
@@ -918,14 +927,14 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
918} while (0) 927} while (0)
919#define pte_lock_deinit(page) ((page)->mapping = NULL) 928#define pte_lock_deinit(page) ((page)->mapping = NULL)
920#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) 929#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
921#else 930#else /* !USE_SPLIT_PTLOCKS */
922/* 931/*
923 * We use mm->page_table_lock to guard all pagetable pages of the mm. 932 * We use mm->page_table_lock to guard all pagetable pages of the mm.
924 */ 933 */
925#define pte_lock_init(page) do {} while (0) 934#define pte_lock_init(page) do {} while (0)
926#define pte_lock_deinit(page) do {} while (0) 935#define pte_lock_deinit(page) do {} while (0)
927#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) 936#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
928#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 937#endif /* USE_SPLIT_PTLOCKS */
929 938
930static inline void pgtable_page_ctor(struct page *page) 939static inline void pgtable_page_ctor(struct page *page)
931{ 940{
@@ -1009,7 +1018,6 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1009extern void get_pfn_range_for_nid(unsigned int nid, 1018extern void get_pfn_range_for_nid(unsigned int nid,
1010 unsigned long *start_pfn, unsigned long *end_pfn); 1019 unsigned long *start_pfn, unsigned long *end_pfn);
1011extern unsigned long find_min_pfn_with_active_regions(void); 1020extern unsigned long find_min_pfn_with_active_regions(void);
1012extern unsigned long find_max_pfn_with_active_regions(void);
1013extern void free_bootmem_with_active_regions(int nid, 1021extern void free_bootmem_with_active_regions(int nid,
1014 unsigned long max_low_pfn); 1022 unsigned long max_low_pfn);
1015typedef int (*work_fn_t)(unsigned long, unsigned long, void *); 1023typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
@@ -1072,6 +1080,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1072 unsigned long addr, unsigned long len, pgoff_t pgoff); 1080 unsigned long addr, unsigned long len, pgoff_t pgoff);
1073extern void exit_mmap(struct mm_struct *); 1081extern void exit_mmap(struct mm_struct *);
1074 1082
1083extern int mm_take_all_locks(struct mm_struct *mm);
1084extern void mm_drop_all_locks(struct mm_struct *mm);
1085
1075#ifdef CONFIG_PROC_FS 1086#ifdef CONFIG_PROC_FS
1076/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ 1087/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
1077extern void added_exe_file_vma(struct mm_struct *mm); 1088extern void added_exe_file_vma(struct mm_struct *mm);