diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:50:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:50:13 -0400 |
commit | 517d08699b250021303f9a7cf0d758b6dc0748ed (patch) | |
tree | 5e5b0134c3fffb78fe9d8b1641a64ff28fdd7bbc /include/linux/gfp.h | |
parent | 8eeee4e2f04fc551f50c9d9847da2d73d7d33728 (diff) | |
parent | a34601c5d84134055782ee031d58d82f5440e918 (diff) |
Merge branch 'akpm'
* akpm: (182 commits)
fbdev: bf54x-lq043fb: use kzalloc over kmalloc/memset
fbdev: *bfin*: fix __dev{init,exit} markings
fbdev: *bfin*: drop unnecessary calls to memset
fbdev: bfin-t350mcqb-fb: drop unused local variables
fbdev: blackfin has __raw I/O accessors, so use them in fb.h
fbdev: s1d13xxxfb: add accelerated bitblt functions
tcx: use standard fields for framebuffer physical address and length
fbdev: add support for handoff from firmware to hw framebuffers
intelfb: fix a bug when changing video timing
fbdev: use framebuffer_release() for freeing fb_info structures
radeon: P2G2CLK_ALWAYS_ONb tested twice, should 2nd be P2G2CLK_DAC_ALWAYS_ONb?
s3c-fb: CPUFREQ frequency scaling support
s3c-fb: fix resource releasing on error during probing
carminefb: fix possible access beyond end of carmine_modedb[]
acornfb: remove fb_mmap function
mb862xxfb: use CONFIG_OF instead of CONFIG_PPC_OF
mb862xxfb: restrict compliation of platform driver to PPC
Samsung SoC Framebuffer driver: add Alpha Channel support
atmel-lcdc: fix pixclock upper bound detection
offb: use framebuffer_alloc() to allocate fb_info struct
...
Manually fix up conflicts due to kmemcheck in mm/slab.c
Diffstat (limited to 'include/linux/gfp.h')
-rw-r--r-- | include/linux/gfp.h | 150 |
1 files changed, 119 insertions, 31 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 80e14b8c2e78..cfdb35d71bca 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/linkage.h> | 6 | #include <linux/linkage.h> |
7 | #include <linux/topology.h> | 7 | #include <linux/topology.h> |
8 | #include <linux/mmdebug.h> | ||
8 | 9 | ||
9 | struct vm_area_struct; | 10 | struct vm_area_struct; |
10 | 11 | ||
@@ -20,7 +21,8 @@ struct vm_area_struct; | |||
20 | #define __GFP_DMA ((__force gfp_t)0x01u) | 21 | #define __GFP_DMA ((__force gfp_t)0x01u) |
21 | #define __GFP_HIGHMEM ((__force gfp_t)0x02u) | 22 | #define __GFP_HIGHMEM ((__force gfp_t)0x02u) |
22 | #define __GFP_DMA32 ((__force gfp_t)0x04u) | 23 | #define __GFP_DMA32 ((__force gfp_t)0x04u) |
23 | 24 | #define __GFP_MOVABLE ((__force gfp_t)0x08u) /* Page is movable */ | |
25 | #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) | ||
24 | /* | 26 | /* |
25 | * Action modifiers - doesn't change the zoning | 27 | * Action modifiers - doesn't change the zoning |
26 | * | 28 | * |
@@ -50,7 +52,6 @@ struct vm_area_struct; | |||
50 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ | 52 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ |
51 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ | 53 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ |
52 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ | 54 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ |
53 | #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ | ||
54 | 55 | ||
55 | #ifdef CONFIG_KMEMCHECK | 56 | #ifdef CONFIG_KMEMCHECK |
56 | #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ | 57 | #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ |
@@ -127,24 +128,105 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) | |||
127 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); | 128 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); |
128 | } | 129 | } |
129 | 130 | ||
130 | static inline enum zone_type gfp_zone(gfp_t flags) | 131 | #ifdef CONFIG_HIGHMEM |
131 | { | 132 | #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM |
133 | #else | ||
134 | #define OPT_ZONE_HIGHMEM ZONE_NORMAL | ||
135 | #endif | ||
136 | |||
132 | #ifdef CONFIG_ZONE_DMA | 137 | #ifdef CONFIG_ZONE_DMA |
133 | if (flags & __GFP_DMA) | 138 | #define OPT_ZONE_DMA ZONE_DMA |
134 | return ZONE_DMA; | 139 | #else |
140 | #define OPT_ZONE_DMA ZONE_NORMAL | ||
135 | #endif | 141 | #endif |
142 | |||
136 | #ifdef CONFIG_ZONE_DMA32 | 143 | #ifdef CONFIG_ZONE_DMA32 |
137 | if (flags & __GFP_DMA32) | 144 | #define OPT_ZONE_DMA32 ZONE_DMA32 |
138 | return ZONE_DMA32; | 145 | #else |
146 | #define OPT_ZONE_DMA32 ZONE_NORMAL | ||
139 | #endif | 147 | #endif |
140 | if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == | 148 | |
141 | (__GFP_HIGHMEM | __GFP_MOVABLE)) | 149 | /* |
142 | return ZONE_MOVABLE; | 150 | * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the |
143 | #ifdef CONFIG_HIGHMEM | 151 | * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long |
144 | if (flags & __GFP_HIGHMEM) | 152 | * and there are 16 of them to cover all possible combinations of |
145 | return ZONE_HIGHMEM; | 153 | * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM |
154 | * | ||
155 | * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. | ||
156 | * But GFP_MOVABLE is not only a zone specifier but also an allocation | ||
157 | * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. | ||
158 | * Only 1bit of the lowest 3 bit (DMA,DMA32,HIGHMEM) can be set to "1". | ||
159 | * | ||
160 | * bit result | ||
161 | * ================= | ||
162 | * 0x0 => NORMAL | ||
163 | * 0x1 => DMA or NORMAL | ||
164 | * 0x2 => HIGHMEM or NORMAL | ||
165 | * 0x3 => BAD (DMA+HIGHMEM) | ||
166 | * 0x4 => DMA32 or DMA or NORMAL | ||
167 | * 0x5 => BAD (DMA+DMA32) | ||
168 | * 0x6 => BAD (HIGHMEM+DMA32) | ||
169 | * 0x7 => BAD (HIGHMEM+DMA32+DMA) | ||
170 | * 0x8 => NORMAL (MOVABLE+0) | ||
171 | * 0x9 => DMA or NORMAL (MOVABLE+DMA) | ||
172 | * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) | ||
173 | * 0xb => BAD (MOVABLE+HIGHMEM+DMA) | ||
174 | * 0xc => DMA32 (MOVABLE+HIGHMEM+DMA32) | ||
175 | * 0xd => BAD (MOVABLE+DMA32+DMA) | ||
176 | * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) | ||
177 | * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) | ||
178 | * | ||
179 | * ZONES_SHIFT must be <= 2 on 32 bit platforms. | ||
180 | */ | ||
181 | |||
182 | #if 16 * ZONES_SHIFT > BITS_PER_LONG | ||
183 | #error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer | ||
184 | #endif | ||
185 | |||
186 | #define GFP_ZONE_TABLE ( \ | ||
187 | (ZONE_NORMAL << 0 * ZONES_SHIFT) \ | ||
188 | | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \ | ||
189 | | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \ | ||
190 | | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \ | ||
191 | | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \ | ||
192 | | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT) \ | ||
193 | | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\ | ||
194 | | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\ | ||
195 | ) | ||
196 | |||
197 | /* | ||
198 | * GFP_ZONE_BAD is a bitmap for all combination of __GFP_DMA, __GFP_DMA32 | ||
199 | * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per | ||
200 | * entry starting with bit 0. Bit is set if the combination is not | ||
201 | * allowed. | ||
202 | */ | ||
203 | #define GFP_ZONE_BAD ( \ | ||
204 | 1 << (__GFP_DMA | __GFP_HIGHMEM) \ | ||
205 | | 1 << (__GFP_DMA | __GFP_DMA32) \ | ||
206 | | 1 << (__GFP_DMA32 | __GFP_HIGHMEM) \ | ||
207 | | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM) \ | ||
208 | | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA) \ | ||
209 | | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA) \ | ||
210 | | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM) \ | ||
211 | | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\ | ||
212 | ) | ||
213 | |||
214 | static inline enum zone_type gfp_zone(gfp_t flags) | ||
215 | { | ||
216 | enum zone_type z; | ||
217 | int bit = flags & GFP_ZONEMASK; | ||
218 | |||
219 | z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & | ||
220 | ((1 << ZONES_SHIFT) - 1); | ||
221 | |||
222 | if (__builtin_constant_p(bit)) | ||
223 | BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1); | ||
224 | else { | ||
225 | #ifdef CONFIG_DEBUG_VM | ||
226 | BUG_ON((GFP_ZONE_BAD >> bit) & 1); | ||
146 | #endif | 227 | #endif |
147 | return ZONE_NORMAL; | 228 | } |
229 | return z; | ||
148 | } | 230 | } |
149 | 231 | ||
150 | /* | 232 | /* |
@@ -184,30 +266,19 @@ static inline void arch_alloc_page(struct page *page, int order) { } | |||
184 | #endif | 266 | #endif |
185 | 267 | ||
186 | struct page * | 268 | struct page * |
187 | __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | 269 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, |
188 | struct zonelist *zonelist, nodemask_t *nodemask); | 270 | struct zonelist *zonelist, nodemask_t *nodemask); |
189 | 271 | ||
190 | static inline struct page * | 272 | static inline struct page * |
191 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | 273 | __alloc_pages(gfp_t gfp_mask, unsigned int order, |
192 | struct zonelist *zonelist) | 274 | struct zonelist *zonelist) |
193 | { | 275 | { |
194 | return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); | 276 | return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); |
195 | } | 277 | } |
196 | 278 | ||
197 | static inline struct page * | ||
198 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | ||
199 | struct zonelist *zonelist, nodemask_t *nodemask) | ||
200 | { | ||
201 | return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); | ||
202 | } | ||
203 | |||
204 | |||
205 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | 279 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, |
206 | unsigned int order) | 280 | unsigned int order) |
207 | { | 281 | { |
208 | if (unlikely(order >= MAX_ORDER)) | ||
209 | return NULL; | ||
210 | |||
211 | /* Unknown node is current node */ | 282 | /* Unknown node is current node */ |
212 | if (nid < 0) | 283 | if (nid < 0) |
213 | nid = numa_node_id(); | 284 | nid = numa_node_id(); |
@@ -215,15 +286,20 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | |||
215 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | 286 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); |
216 | } | 287 | } |
217 | 288 | ||
289 | static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, | ||
290 | unsigned int order) | ||
291 | { | ||
292 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); | ||
293 | |||
294 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | ||
295 | } | ||
296 | |||
218 | #ifdef CONFIG_NUMA | 297 | #ifdef CONFIG_NUMA |
219 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); | 298 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); |
220 | 299 | ||
221 | static inline struct page * | 300 | static inline struct page * |
222 | alloc_pages(gfp_t gfp_mask, unsigned int order) | 301 | alloc_pages(gfp_t gfp_mask, unsigned int order) |
223 | { | 302 | { |
224 | if (unlikely(order >= MAX_ORDER)) | ||
225 | return NULL; | ||
226 | |||
227 | return alloc_pages_current(gfp_mask, order); | 303 | return alloc_pages_current(gfp_mask, order); |
228 | } | 304 | } |
229 | extern struct page *alloc_page_vma(gfp_t gfp_mask, | 305 | extern struct page *alloc_page_vma(gfp_t gfp_mask, |
@@ -260,4 +336,16 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); | |||
260 | void drain_all_pages(void); | 336 | void drain_all_pages(void); |
261 | void drain_local_pages(void *dummy); | 337 | void drain_local_pages(void *dummy); |
262 | 338 | ||
339 | extern bool oom_killer_disabled; | ||
340 | |||
341 | static inline void oom_killer_disable(void) | ||
342 | { | ||
343 | oom_killer_disabled = true; | ||
344 | } | ||
345 | |||
346 | static inline void oom_killer_enable(void) | ||
347 | { | ||
348 | oom_killer_disabled = false; | ||
349 | } | ||
350 | |||
263 | #endif /* __LINUX_GFP_H */ | 351 | #endif /* __LINUX_GFP_H */ |