diff options
author | Jiri Kosina <jkosina@suse.cz> | 2011-09-15 09:08:05 -0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2011-09-15 09:08:18 -0400 |
commit | e060c38434b2caa78efe7cedaff4191040b65a15 (patch) | |
tree | 407361230bf6733f63d8e788e4b5e6566ee04818 /include/linux/mm.h | |
parent | 10e4ac572eeffe5317019bd7330b6058a400dfc2 (diff) | |
parent | cc39c6a9bbdebfcf1a7dee64d83bf302bc38d941 (diff) |
Merge branch 'master' into for-next
Fast-forward merge with Linus to be able to merge patches
based on more recent version of the tree.
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 19 |
1 files changed, 14 insertions, 5 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 3172a1c0f08e..7438071b44aa 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -685,7 +685,7 @@ static inline void set_page_section(struct page *page, unsigned long section) | |||
685 | page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; | 685 | page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; |
686 | } | 686 | } |
687 | 687 | ||
688 | static inline unsigned long page_to_section(struct page *page) | 688 | static inline unsigned long page_to_section(const struct page *page) |
689 | { | 689 | { |
690 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; | 690 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; |
691 | } | 691 | } |
@@ -720,7 +720,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone, | |||
720 | 720 | ||
721 | static __always_inline void *lowmem_page_address(const struct page *page) | 721 | static __always_inline void *lowmem_page_address(const struct page *page) |
722 | { | 722 | { |
723 | return __va(PFN_PHYS(page_to_pfn((struct page *)page))); | 723 | return __va(PFN_PHYS(page_to_pfn(page))); |
724 | } | 724 | } |
725 | 725 | ||
726 | #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) | 726 | #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) |
@@ -737,7 +737,7 @@ static __always_inline void *lowmem_page_address(const struct page *page) | |||
737 | #endif | 737 | #endif |
738 | 738 | ||
739 | #if defined(HASHED_PAGE_VIRTUAL) | 739 | #if defined(HASHED_PAGE_VIRTUAL) |
740 | void *page_address(struct page *page); | 740 | void *page_address(const struct page *page); |
741 | void set_page_address(struct page *page, void *virtual); | 741 | void set_page_address(struct page *page, void *virtual); |
742 | void page_address_init(void); | 742 | void page_address_init(void); |
743 | #endif | 743 | #endif |
@@ -962,6 +962,8 @@ int invalidate_inode_page(struct page *page); | |||
962 | #ifdef CONFIG_MMU | 962 | #ifdef CONFIG_MMU |
963 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 963 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
964 | unsigned long address, unsigned int flags); | 964 | unsigned long address, unsigned int flags); |
965 | extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | ||
966 | unsigned long address, unsigned int fault_flags); | ||
965 | #else | 967 | #else |
966 | static inline int handle_mm_fault(struct mm_struct *mm, | 968 | static inline int handle_mm_fault(struct mm_struct *mm, |
967 | struct vm_area_struct *vma, unsigned long address, | 969 | struct vm_area_struct *vma, unsigned long address, |
@@ -971,6 +973,14 @@ static inline int handle_mm_fault(struct mm_struct *mm, | |||
971 | BUG(); | 973 | BUG(); |
972 | return VM_FAULT_SIGBUS; | 974 | return VM_FAULT_SIGBUS; |
973 | } | 975 | } |
976 | static inline int fixup_user_fault(struct task_struct *tsk, | ||
977 | struct mm_struct *mm, unsigned long address, | ||
978 | unsigned int fault_flags) | ||
979 | { | ||
980 | /* should never happen if there's no MMU */ | ||
981 | BUG(); | ||
982 | return -EFAULT; | ||
983 | } | ||
974 | #endif | 984 | #endif |
975 | 985 | ||
976 | extern int make_pages_present(unsigned long addr, unsigned long end); | 986 | extern int make_pages_present(unsigned long addr, unsigned long end); |
@@ -988,8 +998,6 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
988 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 998 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
989 | struct page **pages); | 999 | struct page **pages); |
990 | struct page *get_dump_page(unsigned long addr); | 1000 | struct page *get_dump_page(unsigned long addr); |
991 | extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | ||
992 | unsigned long address, unsigned int fault_flags); | ||
993 | 1001 | ||
994 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); | 1002 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); |
995 | extern void do_invalidatepage(struct page *page, unsigned long offset); | 1003 | extern void do_invalidatepage(struct page *page, unsigned long offset); |
@@ -1600,6 +1608,7 @@ enum mf_flags { | |||
1600 | }; | 1608 | }; |
1601 | extern void memory_failure(unsigned long pfn, int trapno); | 1609 | extern void memory_failure(unsigned long pfn, int trapno); |
1602 | extern int __memory_failure(unsigned long pfn, int trapno, int flags); | 1610 | extern int __memory_failure(unsigned long pfn, int trapno, int flags); |
1611 | extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); | ||
1603 | extern int unpoison_memory(unsigned long pfn); | 1612 | extern int unpoison_memory(unsigned long pfn); |
1604 | extern int sysctl_memory_failure_early_kill; | 1613 | extern int sysctl_memory_failure_early_kill; |
1605 | extern int sysctl_memory_failure_recovery; | 1614 | extern int sysctl_memory_failure_recovery; |