diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 152 |
1 files changed, 96 insertions, 56 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 097b3a3c693d..5c1fb0a2e806 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -157,7 +157,7 @@ extern unsigned int kobjsize(const void *objp); | |||
157 | 157 | ||
158 | #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ | 158 | #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ |
159 | #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ | 159 | #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ |
160 | #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */ | 160 | #define VM_RESERVED 0x00080000 /* Pages managed in a special way */ |
161 | #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ | 161 | #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ |
162 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ | 162 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ |
163 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ | 163 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ |
@@ -226,13 +226,18 @@ struct page { | |||
226 | * to show when page is mapped | 226 | * to show when page is mapped |
227 | * & limit reverse map searches. | 227 | * & limit reverse map searches. |
228 | */ | 228 | */ |
229 | unsigned long private; /* Mapping-private opaque data: | 229 | union { |
230 | unsigned long private; /* Mapping-private opaque data: | ||
230 | * usually used for buffer_heads | 231 | * usually used for buffer_heads |
231 | * if PagePrivate set; used for | 232 | * if PagePrivate set; used for |
232 | * swp_entry_t if PageSwapCache | 233 | * swp_entry_t if PageSwapCache |
233 | * When page is free, this indicates | 234 | * When page is free, this indicates |
234 | * order in the buddy system. | 235 | * order in the buddy system. |
235 | */ | 236 | */ |
237 | #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS | ||
238 | spinlock_t ptl; | ||
239 | #endif | ||
240 | } u; | ||
236 | struct address_space *mapping; /* If low bit clear, points to | 241 | struct address_space *mapping; /* If low bit clear, points to |
237 | * inode address_space, or NULL. | 242 | * inode address_space, or NULL. |
238 | * If page mapped as anonymous | 243 | * If page mapped as anonymous |
@@ -260,6 +265,9 @@ struct page { | |||
260 | #endif /* WANT_PAGE_VIRTUAL */ | 265 | #endif /* WANT_PAGE_VIRTUAL */ |
261 | }; | 266 | }; |
262 | 267 | ||
268 | #define page_private(page) ((page)->u.private) | ||
269 | #define set_page_private(page, v) ((page)->u.private = (v)) | ||
270 | |||
263 | /* | 271 | /* |
264 | * FIXME: take this include out, include page-flags.h in | 272 | * FIXME: take this include out, include page-flags.h in |
265 | * files which need it (119 of them) | 273 | * files which need it (119 of them) |
@@ -311,17 +319,17 @@ extern void FASTCALL(__page_cache_release(struct page *)); | |||
311 | 319 | ||
312 | #ifdef CONFIG_HUGETLB_PAGE | 320 | #ifdef CONFIG_HUGETLB_PAGE |
313 | 321 | ||
314 | static inline int page_count(struct page *p) | 322 | static inline int page_count(struct page *page) |
315 | { | 323 | { |
316 | if (PageCompound(p)) | 324 | if (PageCompound(page)) |
317 | p = (struct page *)p->private; | 325 | page = (struct page *)page_private(page); |
318 | return atomic_read(&(p)->_count) + 1; | 326 | return atomic_read(&page->_count) + 1; |
319 | } | 327 | } |
320 | 328 | ||
321 | static inline void get_page(struct page *page) | 329 | static inline void get_page(struct page *page) |
322 | { | 330 | { |
323 | if (unlikely(PageCompound(page))) | 331 | if (unlikely(PageCompound(page))) |
324 | page = (struct page *)page->private; | 332 | page = (struct page *)page_private(page); |
325 | atomic_inc(&page->_count); | 333 | atomic_inc(&page->_count); |
326 | } | 334 | } |
327 | 335 | ||
@@ -338,7 +346,7 @@ static inline void get_page(struct page *page) | |||
338 | 346 | ||
339 | static inline void put_page(struct page *page) | 347 | static inline void put_page(struct page *page) |
340 | { | 348 | { |
341 | if (!PageReserved(page) && put_page_testzero(page)) | 349 | if (put_page_testzero(page)) |
342 | __page_cache_release(page); | 350 | __page_cache_release(page); |
343 | } | 351 | } |
344 | 352 | ||
@@ -587,7 +595,7 @@ static inline int PageAnon(struct page *page) | |||
587 | static inline pgoff_t page_index(struct page *page) | 595 | static inline pgoff_t page_index(struct page *page) |
588 | { | 596 | { |
589 | if (unlikely(PageSwapCache(page))) | 597 | if (unlikely(PageSwapCache(page))) |
590 | return page->private; | 598 | return page_private(page); |
591 | return page->index; | 599 | return page->index; |
592 | } | 600 | } |
593 | 601 | ||
@@ -682,7 +690,7 @@ struct zap_details { | |||
682 | 690 | ||
683 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, | 691 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, |
684 | unsigned long size, struct zap_details *); | 692 | unsigned long size, struct zap_details *); |
685 | unsigned long unmap_vmas(struct mmu_gather **tlb, struct mm_struct *mm, | 693 | unsigned long unmap_vmas(struct mmu_gather **tlb, |
686 | struct vm_area_struct *start_vma, unsigned long start_addr, | 694 | struct vm_area_struct *start_vma, unsigned long start_addr, |
687 | unsigned long end_addr, unsigned long *nr_accounted, | 695 | unsigned long end_addr, unsigned long *nr_accounted, |
688 | struct zap_details *); | 696 | struct zap_details *); |
@@ -704,10 +712,6 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, | |||
704 | } | 712 | } |
705 | 713 | ||
706 | extern int vmtruncate(struct inode * inode, loff_t offset); | 714 | extern int vmtruncate(struct inode * inode, loff_t offset); |
707 | extern pud_t *FASTCALL(__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)); | ||
708 | extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)); | ||
709 | extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)); | ||
710 | extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)); | ||
711 | extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); | 715 | extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); |
712 | extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); | 716 | extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); |
713 | extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access); | 717 | extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access); |
@@ -723,6 +727,7 @@ void install_arg_page(struct vm_area_struct *, struct page *, unsigned long); | |||
723 | 727 | ||
724 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, | 728 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, |
725 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); | 729 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); |
730 | void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); | ||
726 | 731 | ||
727 | int __set_page_dirty_buffers(struct page *page); | 732 | int __set_page_dirty_buffers(struct page *page); |
728 | int __set_page_dirty_nobuffers(struct page *page); | 733 | int __set_page_dirty_nobuffers(struct page *page); |
@@ -747,7 +752,7 @@ extern unsigned long do_mremap(unsigned long addr, | |||
747 | * The callback will be passed nr_to_scan == 0 when the VM is querying the | 752 | * The callback will be passed nr_to_scan == 0 when the VM is querying the |
748 | * cache size, so a fastpath for that case is appropriate. | 753 | * cache size, so a fastpath for that case is appropriate. |
749 | */ | 754 | */ |
750 | typedef int (*shrinker_t)(int nr_to_scan, unsigned int gfp_mask); | 755 | typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask); |
751 | 756 | ||
752 | /* | 757 | /* |
753 | * Add an aging callback. The int is the number of 'seeks' it takes | 758 | * Add an aging callback. The int is the number of 'seeks' it takes |
@@ -759,38 +764,83 @@ struct shrinker; | |||
759 | extern struct shrinker *set_shrinker(int, shrinker_t); | 764 | extern struct shrinker *set_shrinker(int, shrinker_t); |
760 | extern void remove_shrinker(struct shrinker *shrinker); | 765 | extern void remove_shrinker(struct shrinker *shrinker); |
761 | 766 | ||
762 | /* | 767 | int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); |
763 | * On a two-level or three-level page table, this ends up being trivial. Thus | 768 | int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); |
764 | * the inlining and the symmetry break with pte_alloc_map() that does all | 769 | int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); |
765 | * of this out-of-line. | 770 | int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); |
766 | */ | 771 | |
767 | /* | 772 | /* |
768 | * The following ifdef needed to get the 4level-fixup.h header to work. | 773 | * The following ifdef needed to get the 4level-fixup.h header to work. |
769 | * Remove it when 4level-fixup.h has been removed. | 774 | * Remove it when 4level-fixup.h has been removed. |
770 | */ | 775 | */ |
771 | #ifdef CONFIG_MMU | 776 | #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) |
772 | #ifndef __ARCH_HAS_4LEVEL_HACK | ||
773 | static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) | 777 | static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) |
774 | { | 778 | { |
775 | if (pgd_none(*pgd)) | 779 | return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? |
776 | return __pud_alloc(mm, pgd, address); | 780 | NULL: pud_offset(pgd, address); |
777 | return pud_offset(pgd, address); | ||
778 | } | 781 | } |
779 | 782 | ||
780 | static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) | 783 | static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) |
781 | { | 784 | { |
782 | if (pud_none(*pud)) | 785 | return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? |
783 | return __pmd_alloc(mm, pud, address); | 786 | NULL: pmd_offset(pud, address); |
784 | return pmd_offset(pud, address); | ||
785 | } | 787 | } |
786 | #endif | 788 | #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ |
787 | #endif /* CONFIG_MMU */ | 789 | |
790 | #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS | ||
791 | /* | ||
792 | * We tuck a spinlock to guard each pagetable page into its struct page, | ||
793 | * at page->private, with BUILD_BUG_ON to make sure that this will not | ||
794 | * overflow into the next struct page (as it might with DEBUG_SPINLOCK). | ||
795 | * When freeing, reset page->mapping so free_pages_check won't complain. | ||
796 | */ | ||
797 | #define __pte_lockptr(page) &((page)->u.ptl) | ||
798 | #define pte_lock_init(_page) do { \ | ||
799 | spin_lock_init(__pte_lockptr(_page)); \ | ||
800 | } while (0) | ||
801 | #define pte_lock_deinit(page) ((page)->mapping = NULL) | ||
802 | #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) | ||
803 | #else | ||
804 | /* | ||
805 | * We use mm->page_table_lock to guard all pagetable pages of the mm. | ||
806 | */ | ||
807 | #define pte_lock_init(page) do {} while (0) | ||
808 | #define pte_lock_deinit(page) do {} while (0) | ||
809 | #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) | ||
810 | #endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ | ||
811 | |||
812 | #define pte_offset_map_lock(mm, pmd, address, ptlp) \ | ||
813 | ({ \ | ||
814 | spinlock_t *__ptl = pte_lockptr(mm, pmd); \ | ||
815 | pte_t *__pte = pte_offset_map(pmd, address); \ | ||
816 | *(ptlp) = __ptl; \ | ||
817 | spin_lock(__ptl); \ | ||
818 | __pte; \ | ||
819 | }) | ||
820 | |||
821 | #define pte_unmap_unlock(pte, ptl) do { \ | ||
822 | spin_unlock(ptl); \ | ||
823 | pte_unmap(pte); \ | ||
824 | } while (0) | ||
825 | |||
826 | #define pte_alloc_map(mm, pmd, address) \ | ||
827 | ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \ | ||
828 | NULL: pte_offset_map(pmd, address)) | ||
829 | |||
830 | #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ | ||
831 | ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \ | ||
832 | NULL: pte_offset_map_lock(mm, pmd, address, ptlp)) | ||
833 | |||
834 | #define pte_alloc_kernel(pmd, address) \ | ||
835 | ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ | ||
836 | NULL: pte_offset_kernel(pmd, address)) | ||
788 | 837 | ||
789 | extern void free_area_init(unsigned long * zones_size); | 838 | extern void free_area_init(unsigned long * zones_size); |
790 | extern void free_area_init_node(int nid, pg_data_t *pgdat, | 839 | extern void free_area_init_node(int nid, pg_data_t *pgdat, |
791 | unsigned long * zones_size, unsigned long zone_start_pfn, | 840 | unsigned long * zones_size, unsigned long zone_start_pfn, |
792 | unsigned long *zholes_size); | 841 | unsigned long *zholes_size); |
793 | extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long); | 842 | extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long); |
843 | extern void setup_per_zone_pages_min(void); | ||
794 | extern void mem_init(void); | 844 | extern void mem_init(void); |
795 | extern void show_mem(void); | 845 | extern void show_mem(void); |
796 | extern void si_meminfo(struct sysinfo * val); | 846 | extern void si_meminfo(struct sysinfo * val); |
@@ -834,6 +884,7 @@ extern int split_vma(struct mm_struct *, | |||
834 | extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); | 884 | extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); |
835 | extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, | 885 | extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, |
836 | struct rb_node **, struct rb_node *); | 886 | struct rb_node **, struct rb_node *); |
887 | extern void unlink_file_vma(struct vm_area_struct *); | ||
837 | extern struct vm_area_struct *copy_vma(struct vm_area_struct **, | 888 | extern struct vm_area_struct *copy_vma(struct vm_area_struct **, |
838 | unsigned long addr, unsigned long len, pgoff_t pgoff); | 889 | unsigned long addr, unsigned long len, pgoff_t pgoff); |
839 | extern void exit_mmap(struct mm_struct *); | 890 | extern void exit_mmap(struct mm_struct *); |
@@ -894,7 +945,8 @@ void handle_ra_miss(struct address_space *mapping, | |||
894 | unsigned long max_sane_readahead(unsigned long nr); | 945 | unsigned long max_sane_readahead(unsigned long nr); |
895 | 946 | ||
896 | /* Do stack extension */ | 947 | /* Do stack extension */ |
897 | extern int expand_stack(struct vm_area_struct * vma, unsigned long address); | 948 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); |
949 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); | ||
898 | 950 | ||
899 | /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ | 951 | /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ |
900 | extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); | 952 | extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); |
@@ -917,40 +969,28 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma) | |||
917 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 969 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
918 | } | 970 | } |
919 | 971 | ||
920 | extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr); | 972 | struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); |
973 | struct page *vmalloc_to_page(void *addr); | ||
974 | unsigned long vmalloc_to_pfn(void *addr); | ||
975 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, | ||
976 | unsigned long pfn, unsigned long size, pgprot_t); | ||
921 | 977 | ||
922 | extern struct page * vmalloc_to_page(void *addr); | 978 | struct page *follow_page(struct mm_struct *, unsigned long address, |
923 | extern unsigned long vmalloc_to_pfn(void *addr); | 979 | unsigned int foll_flags); |
924 | extern struct page * follow_page(struct mm_struct *mm, unsigned long address, | 980 | #define FOLL_WRITE 0x01 /* check pte is writable */ |
925 | int write); | 981 | #define FOLL_TOUCH 0x02 /* mark page accessed */ |
926 | extern int check_user_page_readable(struct mm_struct *mm, unsigned long address); | 982 | #define FOLL_GET 0x04 /* do get_page on page */ |
927 | int remap_pfn_range(struct vm_area_struct *, unsigned long, | 983 | #define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */ |
928 | unsigned long, unsigned long, pgprot_t); | ||
929 | 984 | ||
930 | #ifdef CONFIG_PROC_FS | 985 | #ifdef CONFIG_PROC_FS |
931 | void __vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); | 986 | void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); |
932 | #else | 987 | #else |
933 | static inline void __vm_stat_account(struct mm_struct *mm, | 988 | static inline void vm_stat_account(struct mm_struct *mm, |
934 | unsigned long flags, struct file *file, long pages) | 989 | unsigned long flags, struct file *file, long pages) |
935 | { | 990 | { |
936 | } | 991 | } |
937 | #endif /* CONFIG_PROC_FS */ | 992 | #endif /* CONFIG_PROC_FS */ |
938 | 993 | ||
939 | static inline void vm_stat_account(struct vm_area_struct *vma) | ||
940 | { | ||
941 | __vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, | ||
942 | vma_pages(vma)); | ||
943 | } | ||
944 | |||
945 | static inline void vm_stat_unaccount(struct vm_area_struct *vma) | ||
946 | { | ||
947 | __vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, | ||
948 | -vma_pages(vma)); | ||
949 | } | ||
950 | |||
951 | /* update per process rss and vm hiwater data */ | ||
952 | extern void update_mem_hiwater(struct task_struct *tsk); | ||
953 | |||
954 | #ifndef CONFIG_DEBUG_PAGEALLOC | 994 | #ifndef CONFIG_DEBUG_PAGEALLOC |
955 | static inline void | 995 | static inline void |
956 | kernel_map_pages(struct page *page, int numpages, int enable) | 996 | kernel_map_pages(struct page *page, int numpages, int enable) |