diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:55:21 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:55:21 -0400 |
commit | bbb20089a3275a19e475dbc21320c3742e3ca423 (patch) | |
tree | 216fdc1cbef450ca688135c5b8969169482d9a48 /include/linux/gfp.h | |
parent | 3e48e656903e9fd8bc805c6a2c4264d7808d315b (diff) | |
parent | 657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 (diff) |
Merge branch 'dmaengine' into async-tx-next
Conflicts:
crypto/async_tx/async_xor.c
drivers/dma/ioat/dma_v2.h
drivers/dma/ioat/pci.c
drivers/md/raid5.c
Diffstat (limited to 'include/linux/gfp.h')
-rw-r--r-- | include/linux/gfp.h | 174 |
1 files changed, 142 insertions, 32 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0bbc15f54536..7c777a0da17a 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/linkage.h> | 6 | #include <linux/linkage.h> |
7 | #include <linux/topology.h> | 7 | #include <linux/topology.h> |
8 | #include <linux/mmdebug.h> | ||
8 | 9 | ||
9 | struct vm_area_struct; | 10 | struct vm_area_struct; |
10 | 11 | ||
@@ -20,7 +21,8 @@ struct vm_area_struct; | |||
20 | #define __GFP_DMA ((__force gfp_t)0x01u) | 21 | #define __GFP_DMA ((__force gfp_t)0x01u) |
21 | #define __GFP_HIGHMEM ((__force gfp_t)0x02u) | 22 | #define __GFP_HIGHMEM ((__force gfp_t)0x02u) |
22 | #define __GFP_DMA32 ((__force gfp_t)0x04u) | 23 | #define __GFP_DMA32 ((__force gfp_t)0x04u) |
23 | 24 | #define __GFP_MOVABLE ((__force gfp_t)0x08u) /* Page is movable */ | |
25 | #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) | ||
24 | /* | 26 | /* |
25 | * Action modifiers - doesn't change the zoning | 27 | * Action modifiers - doesn't change the zoning |
26 | * | 28 | * |
@@ -50,9 +52,20 @@ struct vm_area_struct; | |||
50 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ | 52 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ |
51 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ | 53 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ |
52 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ | 54 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ |
53 | #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ | ||
54 | 55 | ||
55 | #define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */ | 56 | #ifdef CONFIG_KMEMCHECK |
57 | #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ | ||
58 | #else | ||
59 | #define __GFP_NOTRACK ((__force gfp_t)0) | ||
60 | #endif | ||
61 | |||
62 | /* | ||
63 | * This may seem redundant, but it's a way of annotating false positives vs. | ||
64 | * allocations that simply cannot be supported (e.g. page tables). | ||
65 | */ | ||
66 | #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) | ||
67 | |||
68 | #define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */ | ||
56 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) | 69 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
57 | 70 | ||
58 | /* This equals 0, but use constants in case they ever change */ | 71 | /* This equals 0, but use constants in case they ever change */ |
@@ -85,6 +98,9 @@ struct vm_area_struct; | |||
85 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | 98 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ |
86 | __GFP_NORETRY|__GFP_NOMEMALLOC) | 99 | __GFP_NORETRY|__GFP_NOMEMALLOC) |
87 | 100 | ||
101 | /* Control slab gfp mask during early boot */ | ||
102 | #define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) | ||
103 | |||
88 | /* Control allocation constraints */ | 104 | /* Control allocation constraints */ |
89 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | 105 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) |
90 | 106 | ||
@@ -112,24 +128,105 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) | |||
112 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); | 128 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); |
113 | } | 129 | } |
114 | 130 | ||
115 | static inline enum zone_type gfp_zone(gfp_t flags) | 131 | #ifdef CONFIG_HIGHMEM |
116 | { | 132 | #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM |
133 | #else | ||
134 | #define OPT_ZONE_HIGHMEM ZONE_NORMAL | ||
135 | #endif | ||
136 | |||
117 | #ifdef CONFIG_ZONE_DMA | 137 | #ifdef CONFIG_ZONE_DMA |
118 | if (flags & __GFP_DMA) | 138 | #define OPT_ZONE_DMA ZONE_DMA |
119 | return ZONE_DMA; | 139 | #else |
140 | #define OPT_ZONE_DMA ZONE_NORMAL | ||
120 | #endif | 141 | #endif |
142 | |||
121 | #ifdef CONFIG_ZONE_DMA32 | 143 | #ifdef CONFIG_ZONE_DMA32 |
122 | if (flags & __GFP_DMA32) | 144 | #define OPT_ZONE_DMA32 ZONE_DMA32 |
123 | return ZONE_DMA32; | 145 | #else |
146 | #define OPT_ZONE_DMA32 ZONE_NORMAL | ||
124 | #endif | 147 | #endif |
125 | if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == | 148 | |
126 | (__GFP_HIGHMEM | __GFP_MOVABLE)) | 149 | /* |
127 | return ZONE_MOVABLE; | 150 | * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the |
128 | #ifdef CONFIG_HIGHMEM | 151 | * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long |
129 | if (flags & __GFP_HIGHMEM) | 152 | * and there are 16 of them to cover all possible combinations of |
130 | return ZONE_HIGHMEM; | 153 | * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM |
154 | * | ||
155 | * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. | ||
156 | * But GFP_MOVABLE is not only a zone specifier but also an allocation | ||
157 | * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. | ||
158 | * Only 1bit of the lowest 3 bit (DMA,DMA32,HIGHMEM) can be set to "1". | ||
159 | * | ||
160 | * bit result | ||
161 | * ================= | ||
162 | * 0x0 => NORMAL | ||
163 | * 0x1 => DMA or NORMAL | ||
164 | * 0x2 => HIGHMEM or NORMAL | ||
165 | * 0x3 => BAD (DMA+HIGHMEM) | ||
166 | * 0x4 => DMA32 or DMA or NORMAL | ||
167 | * 0x5 => BAD (DMA+DMA32) | ||
168 | * 0x6 => BAD (HIGHMEM+DMA32) | ||
169 | * 0x7 => BAD (HIGHMEM+DMA32+DMA) | ||
170 | * 0x8 => NORMAL (MOVABLE+0) | ||
171 | * 0x9 => DMA or NORMAL (MOVABLE+DMA) | ||
172 | * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) | ||
173 | * 0xb => BAD (MOVABLE+HIGHMEM+DMA) | ||
174 | * 0xc => DMA32 (MOVABLE+HIGHMEM+DMA32) | ||
175 | * 0xd => BAD (MOVABLE+DMA32+DMA) | ||
176 | * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) | ||
177 | * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) | ||
178 | * | ||
179 | * ZONES_SHIFT must be <= 2 on 32 bit platforms. | ||
180 | */ | ||
181 | |||
182 | #if 16 * ZONES_SHIFT > BITS_PER_LONG | ||
183 | #error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer | ||
131 | #endif | 184 | #endif |
132 | return ZONE_NORMAL; | 185 | |
186 | #define GFP_ZONE_TABLE ( \ | ||
187 | (ZONE_NORMAL << 0 * ZONES_SHIFT) \ | ||
188 | | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \ | ||
189 | | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \ | ||
190 | | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \ | ||
191 | | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \ | ||
192 | | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT) \ | ||
193 | | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\ | ||
194 | | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\ | ||
195 | ) | ||
196 | |||
197 | /* | ||
198 | * GFP_ZONE_BAD is a bitmap for all combination of __GFP_DMA, __GFP_DMA32 | ||
199 | * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per | ||
200 | * entry starting with bit 0. Bit is set if the combination is not | ||
201 | * allowed. | ||
202 | */ | ||
203 | #define GFP_ZONE_BAD ( \ | ||
204 | 1 << (__GFP_DMA | __GFP_HIGHMEM) \ | ||
205 | | 1 << (__GFP_DMA | __GFP_DMA32) \ | ||
206 | | 1 << (__GFP_DMA32 | __GFP_HIGHMEM) \ | ||
207 | | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM) \ | ||
208 | | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA) \ | ||
209 | | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA) \ | ||
210 | | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM) \ | ||
211 | | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\ | ||
212 | ) | ||
213 | |||
214 | static inline enum zone_type gfp_zone(gfp_t flags) | ||
215 | { | ||
216 | enum zone_type z; | ||
217 | int bit = flags & GFP_ZONEMASK; | ||
218 | |||
219 | z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & | ||
220 | ((1 << ZONES_SHIFT) - 1); | ||
221 | |||
222 | if (__builtin_constant_p(bit)) | ||
223 | BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1); | ||
224 | else { | ||
225 | #ifdef CONFIG_DEBUG_VM | ||
226 | BUG_ON((GFP_ZONE_BAD >> bit) & 1); | ||
227 | #endif | ||
228 | } | ||
229 | return z; | ||
133 | } | 230 | } |
134 | 231 | ||
135 | /* | 232 | /* |
@@ -169,30 +266,19 @@ static inline void arch_alloc_page(struct page *page, int order) { } | |||
169 | #endif | 266 | #endif |
170 | 267 | ||
171 | struct page * | 268 | struct page * |
172 | __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | 269 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, |
173 | struct zonelist *zonelist, nodemask_t *nodemask); | 270 | struct zonelist *zonelist, nodemask_t *nodemask); |
174 | 271 | ||
175 | static inline struct page * | 272 | static inline struct page * |
176 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | 273 | __alloc_pages(gfp_t gfp_mask, unsigned int order, |
177 | struct zonelist *zonelist) | 274 | struct zonelist *zonelist) |
178 | { | 275 | { |
179 | return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); | 276 | return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); |
180 | } | 277 | } |
181 | 278 | ||
182 | static inline struct page * | ||
183 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | ||
184 | struct zonelist *zonelist, nodemask_t *nodemask) | ||
185 | { | ||
186 | return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); | ||
187 | } | ||
188 | |||
189 | |||
190 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | 279 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, |
191 | unsigned int order) | 280 | unsigned int order) |
192 | { | 281 | { |
193 | if (unlikely(order >= MAX_ORDER)) | ||
194 | return NULL; | ||
195 | |||
196 | /* Unknown node is current node */ | 282 | /* Unknown node is current node */ |
197 | if (nid < 0) | 283 | if (nid < 0) |
198 | nid = numa_node_id(); | 284 | nid = numa_node_id(); |
@@ -200,15 +286,20 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | |||
200 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | 286 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); |
201 | } | 287 | } |
202 | 288 | ||
289 | static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, | ||
290 | unsigned int order) | ||
291 | { | ||
292 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); | ||
293 | |||
294 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | ||
295 | } | ||
296 | |||
203 | #ifdef CONFIG_NUMA | 297 | #ifdef CONFIG_NUMA |
204 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); | 298 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); |
205 | 299 | ||
206 | static inline struct page * | 300 | static inline struct page * |
207 | alloc_pages(gfp_t gfp_mask, unsigned int order) | 301 | alloc_pages(gfp_t gfp_mask, unsigned int order) |
208 | { | 302 | { |
209 | if (unlikely(order >= MAX_ORDER)) | ||
210 | return NULL; | ||
211 | |||
212 | return alloc_pages_current(gfp_mask, order); | 303 | return alloc_pages_current(gfp_mask, order); |
213 | } | 304 | } |
214 | extern struct page *alloc_page_vma(gfp_t gfp_mask, | 305 | extern struct page *alloc_page_vma(gfp_t gfp_mask, |
@@ -245,4 +336,23 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); | |||
245 | void drain_all_pages(void); | 336 | void drain_all_pages(void); |
246 | void drain_local_pages(void *dummy); | 337 | void drain_local_pages(void *dummy); |
247 | 338 | ||
339 | extern bool oom_killer_disabled; | ||
340 | |||
341 | static inline void oom_killer_disable(void) | ||
342 | { | ||
343 | oom_killer_disabled = true; | ||
344 | } | ||
345 | |||
346 | static inline void oom_killer_enable(void) | ||
347 | { | ||
348 | oom_killer_disabled = false; | ||
349 | } | ||
350 | |||
351 | extern gfp_t gfp_allowed_mask; | ||
352 | |||
353 | static inline void set_gfp_allowed_mask(gfp_t mask) | ||
354 | { | ||
355 | gfp_allowed_mask = mask; | ||
356 | } | ||
357 | |||
248 | #endif /* __LINUX_GFP_H */ | 358 | #endif /* __LINUX_GFP_H */ |