diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-08-15 12:15:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-15 12:15:17 -0400 |
commit | f3efbe582b5396d134024c03a5fa253f2a85d9a6 (patch) | |
tree | e4e15b7567b82d24cb1e7327398286a2b88df04c /include/linux/mm.h | |
parent | 05d3ed0a1fe3ea05ab9f3b8d32576a0bc2e19660 (diff) | |
parent | b635acec48bcaa9183fcbf4e3955616b0d4119b5 (diff) |
Merge branch 'linus' into x86/gart
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 50 |
1 files changed, 33 insertions, 17 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 2128ef7780c6..fa651609b65d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -41,6 +41,9 @@ extern unsigned long mmap_min_addr; | |||
41 | 41 | ||
42 | #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) | 42 | #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) |
43 | 43 | ||
44 | /* to align the pointer to the (next) page boundary */ | ||
45 | #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) | ||
46 | |||
44 | /* | 47 | /* |
45 | * Linux kernel virtual memory manager primitives. | 48 | * Linux kernel virtual memory manager primitives. |
46 | * The idea being to have a "virtual" mm in the same way | 49 | * The idea being to have a "virtual" mm in the same way |
@@ -100,6 +103,7 @@ extern unsigned int kobjsize(const void *objp); | |||
100 | #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ | 103 | #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ |
101 | #define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ | 104 | #define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ |
102 | #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ | 105 | #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ |
106 | #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ | ||
103 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ | 107 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ |
104 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ | 108 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ |
105 | #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ | 109 | #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ |
@@ -166,12 +170,16 @@ struct vm_operations_struct { | |||
166 | void (*open)(struct vm_area_struct * area); | 170 | void (*open)(struct vm_area_struct * area); |
167 | void (*close)(struct vm_area_struct * area); | 171 | void (*close)(struct vm_area_struct * area); |
168 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); | 172 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); |
169 | unsigned long (*nopfn)(struct vm_area_struct *area, | ||
170 | unsigned long address); | ||
171 | 173 | ||
172 | /* notification that a previously read-only page is about to become | 174 | /* notification that a previously read-only page is about to become |
173 | * writable, if an error is returned it will cause a SIGBUS */ | 175 | * writable, if an error is returned it will cause a SIGBUS */ |
174 | int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); | 176 | int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); |
177 | |||
178 | /* called by access_process_vm when get_user_pages() fails, typically | ||
179 | * for use by special VMAs that can switch between memory and hardware | ||
180 | */ | ||
181 | int (*access)(struct vm_area_struct *vma, unsigned long addr, | ||
182 | void *buf, int len, int write); | ||
175 | #ifdef CONFIG_NUMA | 183 | #ifdef CONFIG_NUMA |
176 | /* | 184 | /* |
177 | * set_policy() op must add a reference to any non-NULL @new mempolicy | 185 | * set_policy() op must add a reference to any non-NULL @new mempolicy |
@@ -675,13 +683,6 @@ static inline int page_mapped(struct page *page) | |||
675 | } | 683 | } |
676 | 684 | ||
677 | /* | 685 | /* |
678 | * Error return values for the *_nopfn functions | ||
679 | */ | ||
680 | #define NOPFN_SIGBUS ((unsigned long) -1) | ||
681 | #define NOPFN_OOM ((unsigned long) -2) | ||
682 | #define NOPFN_REFAULT ((unsigned long) -3) | ||
683 | |||
684 | /* | ||
685 | * Different kinds of faults, as returned by handle_mm_fault(). | 686 | * Different kinds of faults, as returned by handle_mm_fault(). |
686 | * Used to decide whether a process gets delivered SIGBUS or | 687 | * Used to decide whether a process gets delivered SIGBUS or |
687 | * just gets major/minor fault counters bumped up. | 688 | * just gets major/minor fault counters bumped up. |
@@ -743,6 +744,8 @@ struct zap_details { | |||
743 | struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | 744 | struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, |
744 | pte_t pte); | 745 | pte_t pte); |
745 | 746 | ||
747 | int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, | ||
748 | unsigned long size); | ||
746 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, | 749 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, |
747 | unsigned long size, struct zap_details *); | 750 | unsigned long size, struct zap_details *); |
748 | unsigned long unmap_vmas(struct mmu_gather **tlb, | 751 | unsigned long unmap_vmas(struct mmu_gather **tlb, |
@@ -772,14 +775,14 @@ struct mm_walk { | |||
772 | 775 | ||
773 | int walk_page_range(unsigned long addr, unsigned long end, | 776 | int walk_page_range(unsigned long addr, unsigned long end, |
774 | struct mm_walk *walk); | 777 | struct mm_walk *walk); |
775 | void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, | 778 | void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, |
776 | unsigned long end, unsigned long floor, unsigned long ceiling); | 779 | unsigned long end, unsigned long floor, unsigned long ceiling); |
777 | void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma, | ||
778 | unsigned long floor, unsigned long ceiling); | ||
779 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, | 780 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, |
780 | struct vm_area_struct *vma); | 781 | struct vm_area_struct *vma); |
781 | void unmap_mapping_range(struct address_space *mapping, | 782 | void unmap_mapping_range(struct address_space *mapping, |
782 | loff_t const holebegin, loff_t const holelen, int even_cows); | 783 | loff_t const holebegin, loff_t const holelen, int even_cows); |
784 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, | ||
785 | void *buf, int len, int write); | ||
783 | 786 | ||
784 | static inline void unmap_shared_mapping_range(struct address_space *mapping, | 787 | static inline void unmap_shared_mapping_range(struct address_space *mapping, |
785 | loff_t const holebegin, loff_t const holelen) | 788 | loff_t const holebegin, loff_t const holelen) |
@@ -809,7 +812,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void * | |||
809 | 812 | ||
810 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, | 813 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, |
811 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); | 814 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); |
812 | void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); | ||
813 | 815 | ||
814 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); | 816 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); |
815 | extern void do_invalidatepage(struct page *page, unsigned long offset); | 817 | extern void do_invalidatepage(struct page *page, unsigned long offset); |
@@ -833,6 +835,19 @@ extern int mprotect_fixup(struct vm_area_struct *vma, | |||
833 | unsigned long end, unsigned long newflags); | 835 | unsigned long end, unsigned long newflags); |
834 | 836 | ||
835 | /* | 837 | /* |
838 | * get_user_pages_fast provides equivalent functionality to get_user_pages, | ||
839 | * operating on current and current->mm (force=0 and doesn't return any vmas). | ||
840 | * | ||
841 | * get_user_pages_fast may take mmap_sem and page tables, so no assumptions | ||
842 | * can be made about locking. get_user_pages_fast is to be implemented in a | ||
843 | * way that is advantageous (vs get_user_pages()) when the user memory area is | ||
844 | * already faulted in and present in ptes. However if the pages have to be | ||
845 | * faulted in, it may turn out to be slightly slower). | ||
846 | */ | ||
847 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | ||
848 | struct page **pages); | ||
849 | |||
850 | /* | ||
836 | * A callback you can register to apply pressure to ageable caches. | 851 | * A callback you can register to apply pressure to ageable caches. |
837 | * | 852 | * |
838 | * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should | 853 | * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should |
@@ -965,9 +980,8 @@ static inline void pgtable_page_dtor(struct page *page) | |||
965 | NULL: pte_offset_kernel(pmd, address)) | 980 | NULL: pte_offset_kernel(pmd, address)) |
966 | 981 | ||
967 | extern void free_area_init(unsigned long * zones_size); | 982 | extern void free_area_init(unsigned long * zones_size); |
968 | extern void free_area_init_node(int nid, pg_data_t *pgdat, | 983 | extern void free_area_init_node(int nid, unsigned long * zones_size, |
969 | unsigned long * zones_size, unsigned long zone_start_pfn, | 984 | unsigned long zone_start_pfn, unsigned long *zholes_size); |
970 | unsigned long *zholes_size); | ||
971 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 985 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
972 | /* | 986 | /* |
973 | * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its | 987 | * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its |
@@ -1009,7 +1023,6 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn, | |||
1009 | extern void get_pfn_range_for_nid(unsigned int nid, | 1023 | extern void get_pfn_range_for_nid(unsigned int nid, |
1010 | unsigned long *start_pfn, unsigned long *end_pfn); | 1024 | unsigned long *start_pfn, unsigned long *end_pfn); |
1011 | extern unsigned long find_min_pfn_with_active_regions(void); | 1025 | extern unsigned long find_min_pfn_with_active_regions(void); |
1012 | extern unsigned long find_max_pfn_with_active_regions(void); | ||
1013 | extern void free_bootmem_with_active_regions(int nid, | 1026 | extern void free_bootmem_with_active_regions(int nid, |
1014 | unsigned long max_low_pfn); | 1027 | unsigned long max_low_pfn); |
1015 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | 1028 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); |
@@ -1072,6 +1085,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **, | |||
1072 | unsigned long addr, unsigned long len, pgoff_t pgoff); | 1085 | unsigned long addr, unsigned long len, pgoff_t pgoff); |
1073 | extern void exit_mmap(struct mm_struct *); | 1086 | extern void exit_mmap(struct mm_struct *); |
1074 | 1087 | ||
1088 | extern int mm_take_all_locks(struct mm_struct *mm); | ||
1089 | extern void mm_drop_all_locks(struct mm_struct *mm); | ||
1090 | |||
1075 | #ifdef CONFIG_PROC_FS | 1091 | #ifdef CONFIG_PROC_FS |
1076 | /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ | 1092 | /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ |
1077 | extern void added_exe_file_vma(struct mm_struct *mm); | 1093 | extern void added_exe_file_vma(struct mm_struct *mm); |