diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/gfp.h | 16 | ||||
-rw-r--r-- | include/linux/highmem.h | 51 |
2 files changed, 64 insertions, 3 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0d2ef0b082a..e5882fe49f8 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -30,6 +30,9 @@ struct vm_area_struct; | |||
30 | * cannot handle allocation failures. | 30 | * cannot handle allocation failures. |
31 | * | 31 | * |
32 | * __GFP_NORETRY: The VM implementation must not retry indefinitely. | 32 | * __GFP_NORETRY: The VM implementation must not retry indefinitely. |
33 | * | ||
34 | * __GFP_MOVABLE: Flag that this page will be movable by the page migration | ||
35 | * mechanism or reclaimed | ||
33 | */ | 36 | */ |
34 | #define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ | 37 | #define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ |
35 | #define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ | 38 | #define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ |
@@ -45,6 +48,7 @@ struct vm_area_struct; | |||
45 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ | 48 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ |
46 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ | 49 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ |
47 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ | 50 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ |
51 | #define __GFP_MOVABLE ((__force gfp_t)0x80000u) /* Page is movable */ | ||
48 | 52 | ||
49 | #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ | 53 | #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ |
50 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) | 54 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
@@ -53,7 +57,8 @@ struct vm_area_struct; | |||
53 | #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ | 57 | #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ |
54 | __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ | 58 | __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ |
55 | __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \ | 59 | __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \ |
56 | __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE) | 60 | __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE| \ |
61 | __GFP_MOVABLE) | ||
57 | 62 | ||
58 | /* This equals 0, but use constants in case they ever change */ | 63 | /* This equals 0, but use constants in case they ever change */ |
59 | #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) | 64 | #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) |
@@ -65,6 +70,15 @@ struct vm_area_struct; | |||
65 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) | 70 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
66 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ | 71 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ |
67 | __GFP_HIGHMEM) | 72 | __GFP_HIGHMEM) |
73 | #define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ | ||
74 | __GFP_HARDWALL | __GFP_HIGHMEM | \ | ||
75 | __GFP_MOVABLE) | ||
76 | #define GFP_NOFS_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_MOVABLE) | ||
77 | #define GFP_USER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ | ||
78 | __GFP_HARDWALL | __GFP_MOVABLE) | ||
79 | #define GFP_HIGHUSER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ | ||
80 | __GFP_HARDWALL | __GFP_HIGHMEM | \ | ||
81 | __GFP_MOVABLE) | ||
68 | 82 | ||
69 | #ifdef CONFIG_NUMA | 83 | #ifdef CONFIG_NUMA |
70 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) | 84 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 98e2cce996a..12c5e4e3135 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -73,10 +73,27 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr) | |||
73 | } | 73 | } |
74 | 74 | ||
75 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 75 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
76 | /** | ||
77 | * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags | ||
78 | * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE | ||
79 | * @vma: The VMA the page is to be allocated for | ||
80 | * @vaddr: The virtual address the page will be inserted into | ||
81 | * | ||
82 | * This function will allocate a page for a VMA but the caller is expected | ||
83 | * to specify via movableflags whether the page will be movable in the | ||
84 | * future or not | ||
85 | * | ||
86 | * An architecture may override this function by defining | ||
87 | * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own | ||
88 | * implementation. | ||
89 | */ | ||
76 | static inline struct page * | 90 | static inline struct page * |
77 | alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr) | 91 | __alloc_zeroed_user_highpage(gfp_t movableflags, |
92 | struct vm_area_struct *vma, | ||
93 | unsigned long vaddr) | ||
78 | { | 94 | { |
79 | struct page *page = alloc_page_vma(GFP_HIGHUSER, vma, vaddr); | 95 | struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, |
96 | vma, vaddr); | ||
80 | 97 | ||
81 | if (page) | 98 | if (page) |
82 | clear_user_highpage(page, vaddr); | 99 | clear_user_highpage(page, vaddr); |
@@ -85,6 +102,36 @@ alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr) | |||
85 | } | 102 | } |
86 | #endif | 103 | #endif |
87 | 104 | ||
105 | /** | ||
106 | * alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA | ||
107 | * @vma: The VMA the page is to be allocated for | ||
108 | * @vaddr: The virtual address the page will be inserted into | ||
109 | * | ||
110 | * This function will allocate a page for a VMA that the caller knows will | ||
111 | * not be able to move in the future using move_pages() or reclaim. If it | ||
112 | * is known that the page can move, use alloc_zeroed_user_highpage_movable | ||
113 | */ | ||
114 | static inline struct page * | ||
115 | alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr) | ||
116 | { | ||
117 | return __alloc_zeroed_user_highpage(0, vma, vaddr); | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move | ||
122 | * @vma: The VMA the page is to be allocated for | ||
123 | * @vaddr: The virtual address the page will be inserted into | ||
124 | * | ||
125 | * This function will allocate a page for a VMA that the caller knows will | ||
126 | * be able to migrate in the future using move_pages() or reclaimed | ||
127 | */ | ||
128 | static inline struct page * | ||
129 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, | ||
130 | unsigned long vaddr) | ||
131 | { | ||
132 | return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); | ||
133 | } | ||
134 | |||
88 | static inline void clear_highpage(struct page *page) | 135 | static inline void clear_highpage(struct page *page) |
89 | { | 136 | { |
90 | void *kaddr = kmap_atomic(page, KM_USER0); | 137 | void *kaddr = kmap_atomic(page, KM_USER0); |