aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2009-03-31 18:23:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-01 11:59:14 -0400
commit33925b25d2c00a29664f1994ab350a9bff70f7a2 (patch)
treefe1a0ef5cceba27664eae8f38f9e4e2a27bf1b36
parent7ca43e7564679604d86e9ed834e7bbcffd8a4a3f (diff)
nommu: there is no mlock() for NOMMU, so don't provide the bits
The mlock() facility does not exist for NOMMU since all mappings are effectively locked anyway, so we don't make the bits available when they're not useful. Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Greg Ungerer <gerg@snapgear.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: Enrik Berkhan <Enrik.Berkhan@ge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/page-flags.h20
-rw-r--r--mm/Kconfig8
-rw-r--r--mm/internal.h8
3 files changed, 26 insertions, 10 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 219a523ecdb0..61df1779b2a5 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -96,6 +96,8 @@ enum pageflags {
96 PG_swapbacked, /* Page is backed by RAM/swap */ 96 PG_swapbacked, /* Page is backed by RAM/swap */
97#ifdef CONFIG_UNEVICTABLE_LRU 97#ifdef CONFIG_UNEVICTABLE_LRU
98 PG_unevictable, /* Page is "unevictable" */ 98 PG_unevictable, /* Page is "unevictable" */
99#endif
100#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
99 PG_mlocked, /* Page is vma mlocked */ 101 PG_mlocked, /* Page is vma mlocked */
100#endif 102#endif
101#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 103#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
@@ -234,20 +236,20 @@ PAGEFLAG_FALSE(SwapCache)
234#ifdef CONFIG_UNEVICTABLE_LRU 236#ifdef CONFIG_UNEVICTABLE_LRU
235PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) 237PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
236 TESTCLEARFLAG(Unevictable, unevictable) 238 TESTCLEARFLAG(Unevictable, unevictable)
239#else
240PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
241 SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
242 __CLEARPAGEFLAG_NOOP(Unevictable)
243#endif
237 244
245#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
238#define MLOCK_PAGES 1 246#define MLOCK_PAGES 1
239PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) 247PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
240 TESTSCFLAG(Mlocked, mlocked) 248 TESTSCFLAG(Mlocked, mlocked)
241
242#else 249#else
243
244#define MLOCK_PAGES 0 250#define MLOCK_PAGES 0
245PAGEFLAG_FALSE(Mlocked) 251PAGEFLAG_FALSE(Mlocked)
246 SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) 252 SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked)
247
248PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
249 SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
250 __CLEARPAGEFLAG_NOOP(Unevictable)
251#endif 253#endif
252 254
253#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 255#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
@@ -367,9 +369,13 @@ static inline void __ClearPageTail(struct page *page)
367 369
368#ifdef CONFIG_UNEVICTABLE_LRU 370#ifdef CONFIG_UNEVICTABLE_LRU
369#define __PG_UNEVICTABLE (1 << PG_unevictable) 371#define __PG_UNEVICTABLE (1 << PG_unevictable)
370#define __PG_MLOCKED (1 << PG_mlocked)
371#else 372#else
372#define __PG_UNEVICTABLE 0 373#define __PG_UNEVICTABLE 0
374#endif
375
376#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
377#define __PG_MLOCKED (1 << PG_mlocked)
378#else
373#define __PG_MLOCKED 0 379#define __PG_MLOCKED 0
374#endif 380#endif
375 381
diff --git a/mm/Kconfig b/mm/Kconfig
index a5b77811fdf2..8c895973dfba 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -214,5 +214,13 @@ config UNEVICTABLE_LRU
214 will use one page flag and increase the code size a little, 214 will use one page flag and increase the code size a little,
215 say Y unless you know what you are doing. 215 say Y unless you know what you are doing.
216 216
217config HAVE_MLOCK
218 bool
219 default y if MMU=y
220
221config HAVE_MLOCKED_PAGE_BIT
222 bool
223 default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y
224
217config MMU_NOTIFIER 225config MMU_NOTIFIER
218 bool 226 bool
diff --git a/mm/internal.h b/mm/internal.h
index 478223b73a2a..987bb03fbdd8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -63,6 +63,7 @@ static inline unsigned long page_order(struct page *page)
63 return page_private(page); 63 return page_private(page);
64} 64}
65 65
66#ifdef CONFIG_HAVE_MLOCK
66extern long mlock_vma_pages_range(struct vm_area_struct *vma, 67extern long mlock_vma_pages_range(struct vm_area_struct *vma,
67 unsigned long start, unsigned long end); 68 unsigned long start, unsigned long end);
68extern void munlock_vma_pages_range(struct vm_area_struct *vma, 69extern void munlock_vma_pages_range(struct vm_area_struct *vma,
@@ -71,6 +72,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
71{ 72{
72 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); 73 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
73} 74}
75#endif
74 76
75#ifdef CONFIG_UNEVICTABLE_LRU 77#ifdef CONFIG_UNEVICTABLE_LRU
76/* 78/*
@@ -90,7 +92,7 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old)
90} 92}
91#endif 93#endif
92 94
93#ifdef CONFIG_UNEVICTABLE_LRU 95#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
94/* 96/*
95 * Called only in fault path via page_evictable() for a new page 97 * Called only in fault path via page_evictable() for a new page
96 * to determine if it's being mapped into a LOCKED vma. 98 * to determine if it's being mapped into a LOCKED vma.
@@ -165,7 +167,7 @@ static inline void free_page_mlock(struct page *page)
165 } 167 }
166} 168}
167 169
168#else /* CONFIG_UNEVICTABLE_LRU */ 170#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
169static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) 171static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
170{ 172{
171 return 0; 173 return 0;
@@ -175,7 +177,7 @@ static inline void mlock_vma_page(struct page *page) { }
175static inline void mlock_migrate_page(struct page *new, struct page *old) { } 177static inline void mlock_migrate_page(struct page *new, struct page *old) { }
176static inline void free_page_mlock(struct page *page) { } 178static inline void free_page_mlock(struct page *page) { }
177 179
178#endif /* CONFIG_UNEVICTABLE_LRU */ 180#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
179 181
180/* 182/*
181 * Return the mem_map entry representing the 'offset' subpage within 183 * Return the mem_map entry representing the 'offset' subpage within