diff options
author | Hugh Dickins <hugh.dickins@tiscali.co.uk> | 2009-12-14 20:58:59 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 11:53:17 -0500 |
commit | af8e3354b4bbd1ee5a3a55d11a5e1fe37e77f0ba (patch) | |
tree | 8dc0ece80878d00409d4662c5fd1e28cd7fbbdd8 /mm/internal.h | |
parent | 53f79acb6ecb648afd63e0f13deba167f1a934df (diff) |
mm: CONFIG_MMU for PG_mlocked
Remove three degrees of obfuscation, left over from when we had
CONFIG_UNEVICTABLE_LRU. MLOCK_PAGES is CONFIG_HAVE_MLOCKED_PAGE_BIT is
CONFIG_HAVE_MLOCK is CONFIG_MMU. rmap.o (and memory-failure.o) are only
built when CONFIG_MMU, so don't need such conditions at all.
Somehow, I feel no compulsion to remove the CONFIG_HAVE_MLOCK* lines from
169 defconfigs: leave those to evolve in due course.
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Nick Piggin <npiggin@suse.de>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/internal.h')
-rw-r--r-- | mm/internal.h | 26 |
1 files changed, 12 insertions, 14 deletions
diff --git a/mm/internal.h b/mm/internal.h index 22ec8d2b0fb8..cb7d92d0a46d 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -63,17 +63,6 @@ static inline unsigned long page_order(struct page *page) | |||
63 | return page_private(page); | 63 | return page_private(page); |
64 | } | 64 | } |
65 | 65 | ||
66 | #ifdef CONFIG_HAVE_MLOCK | ||
67 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, | ||
68 | unsigned long start, unsigned long end); | ||
69 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, | ||
70 | unsigned long start, unsigned long end); | ||
71 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | ||
72 | { | ||
73 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); | ||
74 | } | ||
75 | #endif | ||
76 | |||
77 | /* | 66 | /* |
78 | * unevictable_migrate_page() called only from migrate_page_copy() to | 67 | * unevictable_migrate_page() called only from migrate_page_copy() to |
79 | * migrate unevictable flag to new page. | 68 | * migrate unevictable flag to new page. |
@@ -86,7 +75,16 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old) | |||
86 | SetPageUnevictable(new); | 75 | SetPageUnevictable(new); |
87 | } | 76 | } |
88 | 77 | ||
89 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 78 | #ifdef CONFIG_MMU |
79 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, | ||
80 | unsigned long start, unsigned long end); | ||
81 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, | ||
82 | unsigned long start, unsigned long end); | ||
83 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | ||
84 | { | ||
85 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); | ||
86 | } | ||
87 | |||
90 | /* | 88 | /* |
91 | * Called only in fault path via page_evictable() for a new page | 89 | * Called only in fault path via page_evictable() for a new page |
92 | * to determine if it's being mapped into a LOCKED vma. | 90 | * to determine if it's being mapped into a LOCKED vma. |
@@ -144,7 +142,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) | |||
144 | } | 142 | } |
145 | } | 143 | } |
146 | 144 | ||
147 | #else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ | 145 | #else /* !CONFIG_MMU */ |
148 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) | 146 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) |
149 | { | 147 | { |
150 | return 0; | 148 | return 0; |
@@ -153,7 +151,7 @@ static inline void clear_page_mlock(struct page *page) { } | |||
153 | static inline void mlock_vma_page(struct page *page) { } | 151 | static inline void mlock_vma_page(struct page *page) { } |
154 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | 152 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
155 | 153 | ||
156 | #endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ | 154 | #endif /* !CONFIG_MMU */ |
157 | 155 | ||
158 | /* | 156 | /* |
159 | * Return the mem_map entry representing the 'offset' subpage within | 157 | * Return the mem_map entry representing the 'offset' subpage within |