aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-07-17 07:03:05 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:22:59 -0400
commit769848c03895b63e5662eb7e4ec8c4866f7d0183 (patch)
tree8911c7c312c8b8b172795fa2874c8162e1d3d15a /include/linux
parenta32ea1e1f925399e0d81ca3f7394a44a6dafa12c (diff)
Add __GFP_MOVABLE for callers to flag allocations from high memory that may be migrated
It is often known at allocation time whether a page may be migrated or not. This patch adds a flag called __GFP_MOVABLE and a new mask called GFP_HIGH_MOVABLE. Allocations using the __GFP_MOVABLE can be either migrated using the page migration mechanism or reclaimed by syncing with backing storage and discarding. An API function very similar to alloc_zeroed_user_highpage() is added for __GFP_MOVABLE allocations called alloc_zeroed_user_highpage_movable(). The flags used by alloc_zeroed_user_highpage() are not changed because it would change the semantics of an existing API. After this patch is applied there are no in-kernel users of alloc_zeroed_user_highpage() so it probably should be marked deprecated if this patch is merged. Note that this patch includes a minor cleanup to the use of __GFP_ZERO in shmem.c to keep all flag modifications to inode->mapping in the shmem_dir_alloc() helper function. This clean-up suggestion is courtesy of Hugh Dickens. Additional credit goes to Christoph Lameter and Linus Torvalds for shaping the concept. Credit to Hugh Dickens for catching issues with shmem swap vector and ramfs allocations. [akpm@linux-foundation.org: build fix] [hugh@veritas.com: __GFP_ZERO cleanup] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/gfp.h16
-rw-r--r--include/linux/highmem.h51
2 files changed, 64 insertions, 3 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0d2ef0b082a6..e5882fe49f83 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -30,6 +30,9 @@ struct vm_area_struct;
30 * cannot handle allocation failures. 30 * cannot handle allocation failures.
31 * 31 *
32 * __GFP_NORETRY: The VM implementation must not retry indefinitely. 32 * __GFP_NORETRY: The VM implementation must not retry indefinitely.
33 *
34 * __GFP_MOVABLE: Flag that this page will be movable by the page migration
35 * mechanism or reclaimed
33 */ 36 */
34#define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ 37#define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */
35#define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ 38#define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */
@@ -45,6 +48,7 @@ struct vm_area_struct;
45#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ 48#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
46#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ 49#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
47#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ 50#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
51#define __GFP_MOVABLE ((__force gfp_t)0x80000u) /* Page is movable */
48 52
49#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ 53#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
50#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 54#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
@@ -53,7 +57,8 @@ struct vm_area_struct;
53#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ 57#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
54 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ 58 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
55 __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \ 59 __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \
56 __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE) 60 __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE| \
61 __GFP_MOVABLE)
57 62
58/* This equals 0, but use constants in case they ever change */ 63/* This equals 0, but use constants in case they ever change */
59#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) 64#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
@@ -65,6 +70,15 @@ struct vm_area_struct;
65#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 70#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
66#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ 71#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
67 __GFP_HIGHMEM) 72 __GFP_HIGHMEM)
73#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
74 __GFP_HARDWALL | __GFP_HIGHMEM | \
75 __GFP_MOVABLE)
76#define GFP_NOFS_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_MOVABLE)
77#define GFP_USER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
78 __GFP_HARDWALL | __GFP_MOVABLE)
79#define GFP_HIGHUSER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
80 __GFP_HARDWALL | __GFP_HIGHMEM | \
81 __GFP_MOVABLE)
68 82
69#ifdef CONFIG_NUMA 83#ifdef CONFIG_NUMA
70#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) 84#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 98e2cce996a4..12c5e4e3135a 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -73,10 +73,27 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
73} 73}
74 74
75#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 75#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
76/**
77 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
78 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
79 * @vma: The VMA the page is to be allocated for
80 * @vaddr: The virtual address the page will be inserted into
81 *
82 * This function will allocate a page for a VMA but the caller is expected
83 * to specify via movableflags whether the page will be movable in the
84 * future or not
85 *
86 * An architecture may override this function by defining
87 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
88 * implementation.
89 */
76static inline struct page * 90static inline struct page *
77alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr) 91__alloc_zeroed_user_highpage(gfp_t movableflags,
92 struct vm_area_struct *vma,
93 unsigned long vaddr)
78{ 94{
79 struct page *page = alloc_page_vma(GFP_HIGHUSER, vma, vaddr); 95 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
96 vma, vaddr);
80 97
81 if (page) 98 if (page)
82 clear_user_highpage(page, vaddr); 99 clear_user_highpage(page, vaddr);
@@ -85,6 +102,36 @@ alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
85} 102}
86#endif 103#endif
87 104
105/**
106 * alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA
107 * @vma: The VMA the page is to be allocated for
108 * @vaddr: The virtual address the page will be inserted into
109 *
110 * This function will allocate a page for a VMA that the caller knows will
111 * not be able to move in the future using move_pages() or reclaim. If it
112 * is known that the page can move, use alloc_zeroed_user_highpage_movable
113 */
114static inline struct page *
115alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
116{
117 return __alloc_zeroed_user_highpage(0, vma, vaddr);
118}
119
120/**
121 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
122 * @vma: The VMA the page is to be allocated for
123 * @vaddr: The virtual address the page will be inserted into
124 *
125 * This function will allocate a page for a VMA that the caller knows will
126 * be able to migrate in the future using move_pages() or reclaimed
127 */
128static inline struct page *
129alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
130 unsigned long vaddr)
131{
132 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
133}
134
88static inline void clear_highpage(struct page *page) 135static inline void clear_highpage(struct page *page)
89{ 136{
90 void *kaddr = kmap_atomic(page, KM_USER0); 137 void *kaddr = kmap_atomic(page, KM_USER0);