aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h47
1 files changed, 40 insertions, 7 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 41aa49b74821..4871e31ae277 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -35,13 +35,39 @@
35 */ 35 */
36#define PAGE_ALLOC_COSTLY_ORDER 3 36#define PAGE_ALLOC_COSTLY_ORDER 3
37 37
38#define MIGRATE_UNMOVABLE 0 38enum {
39#define MIGRATE_RECLAIMABLE 1 39 MIGRATE_UNMOVABLE,
40#define MIGRATE_MOVABLE 2 40 MIGRATE_RECLAIMABLE,
41#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */ 41 MIGRATE_MOVABLE,
42#define MIGRATE_RESERVE 3 42 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
43#define MIGRATE_ISOLATE 4 /* can't allocate from here */ 43 MIGRATE_RESERVE = MIGRATE_PCPTYPES,
44#define MIGRATE_TYPES 5 44#ifdef CONFIG_CMA
45 /*
46 * MIGRATE_CMA migration type is designed to mimic the way
47 * ZONE_MOVABLE works. Only movable pages can be allocated
48 * from MIGRATE_CMA pageblocks and page allocator never
49 * implicitly change migration type of MIGRATE_CMA pageblock.
50 *
51 * The way to use it is to change migratetype of a range of
52 * pageblocks to MIGRATE_CMA which can be done by
53 * __free_pageblock_cma() function. What is important though
54 * is that a range of pageblocks must be aligned to
55 * MAX_ORDER_NR_PAGES should biggest page be bigger then
56 * a single pageblock.
57 */
58 MIGRATE_CMA,
59#endif
60 MIGRATE_ISOLATE, /* can't allocate from here */
61 MIGRATE_TYPES
62};
63
64#ifdef CONFIG_CMA
65# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
66# define cma_wmark_pages(zone) zone->min_cma_pages
67#else
68# define is_migrate_cma(migratetype) false
69# define cma_wmark_pages(zone) 0
70#endif
45 71
46#define for_each_migratetype_order(order, type) \ 72#define for_each_migratetype_order(order, type) \
47 for (order = 0; order < MAX_ORDER; order++) \ 73 for (order = 0; order < MAX_ORDER; order++) \
@@ -347,6 +373,13 @@ struct zone {
347 /* see spanned/present_pages for more description */ 373 /* see spanned/present_pages for more description */
348 seqlock_t span_seqlock; 374 seqlock_t span_seqlock;
349#endif 375#endif
376#ifdef CONFIG_CMA
377 /*
378 * CMA needs to increase watermark levels during the allocation
379 * process to make sure that the system is not starved.
380 */
381 unsigned long min_cma_pages;
382#endif
350 struct free_area free_area[MAX_ORDER]; 383 struct free_area free_area[MAX_ORDER];
351 384
352#ifndef CONFIG_SPARSEMEM 385#ifndef CONFIG_SPARSEMEM