diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /include/linux/mmzone.h | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 211 |
1 files changed, 51 insertions, 160 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 73b64a38b98..be1ac8d7789 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -30,42 +30,18 @@ | |||
30 | /* | 30 | /* |
31 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed | 31 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed |
32 | * costly to service. That is between allocation orders which should | 32 | * costly to service. That is between allocation orders which should |
33 | * coalesce naturally under reasonable reclaim pressure and those which | 33 | * coelesce naturally under reasonable reclaim pressure and those which |
34 | * will not. | 34 | * will not. |
35 | */ | 35 | */ |
36 | #define PAGE_ALLOC_COSTLY_ORDER 3 | 36 | #define PAGE_ALLOC_COSTLY_ORDER 3 |
37 | 37 | ||
38 | enum { | 38 | #define MIGRATE_UNMOVABLE 0 |
39 | MIGRATE_UNMOVABLE, | 39 | #define MIGRATE_RECLAIMABLE 1 |
40 | MIGRATE_RECLAIMABLE, | 40 | #define MIGRATE_MOVABLE 2 |
41 | MIGRATE_MOVABLE, | 41 | #define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */ |
42 | MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ | 42 | #define MIGRATE_RESERVE 3 |
43 | MIGRATE_RESERVE = MIGRATE_PCPTYPES, | 43 | #define MIGRATE_ISOLATE 4 /* can't allocate from here */ |
44 | #ifdef CONFIG_CMA | 44 | #define MIGRATE_TYPES 5 |
45 | /* | ||
46 | * MIGRATE_CMA migration type is designed to mimic the way | ||
47 | * ZONE_MOVABLE works. Only movable pages can be allocated | ||
48 | * from MIGRATE_CMA pageblocks and page allocator never | ||
49 | * implicitly change migration type of MIGRATE_CMA pageblock. | ||
50 | * | ||
51 | * The way to use it is to change migratetype of a range of | ||
52 | * pageblocks to MIGRATE_CMA which can be done by | ||
53 | * __free_pageblock_cma() function. What is important though | ||
54 | * is that a range of pageblocks must be aligned to | ||
55 | * MAX_ORDER_NR_PAGES should biggest page be bigger then | ||
56 | * a single pageblock. | ||
57 | */ | ||
58 | MIGRATE_CMA, | ||
59 | #endif | ||
60 | MIGRATE_ISOLATE, /* can't allocate from here */ | ||
61 | MIGRATE_TYPES | ||
62 | }; | ||
63 | |||
64 | #ifdef CONFIG_CMA | ||
65 | # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) | ||
66 | #else | ||
67 | # define is_migrate_cma(migratetype) false | ||
68 | #endif | ||
69 | 45 | ||
70 | #define for_each_migratetype_order(order, type) \ | 46 | #define for_each_migratetype_order(order, type) \ |
71 | for (order = 0; order < MAX_ORDER; order++) \ | 47 | for (order = 0; order < MAX_ORDER; order++) \ |
@@ -124,7 +100,6 @@ enum zone_stat_item { | |||
124 | NR_UNSTABLE_NFS, /* NFS unstable pages */ | 100 | NR_UNSTABLE_NFS, /* NFS unstable pages */ |
125 | NR_BOUNCE, | 101 | NR_BOUNCE, |
126 | NR_VMSCAN_WRITE, | 102 | NR_VMSCAN_WRITE, |
127 | NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ | ||
128 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ | 103 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ |
129 | NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ | 104 | NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ |
130 | NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ | 105 | NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ |
@@ -140,7 +115,6 @@ enum zone_stat_item { | |||
140 | NUMA_OTHER, /* allocation from other node */ | 115 | NUMA_OTHER, /* allocation from other node */ |
141 | #endif | 116 | #endif |
142 | NR_ANON_TRANSPARENT_HUGEPAGES, | 117 | NR_ANON_TRANSPARENT_HUGEPAGES, |
143 | NR_FREE_CMA_PAGES, | ||
144 | NR_VM_ZONE_STAT_ITEMS }; | 118 | NR_VM_ZONE_STAT_ITEMS }; |
145 | 119 | ||
146 | /* | 120 | /* |
@@ -165,63 +139,31 @@ enum lru_list { | |||
165 | NR_LRU_LISTS | 139 | NR_LRU_LISTS |
166 | }; | 140 | }; |
167 | 141 | ||
168 | #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) | 142 | #define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++) |
169 | 143 | ||
170 | #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) | 144 | #define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++) |
171 | 145 | ||
172 | static inline int is_file_lru(enum lru_list lru) | 146 | static inline int is_file_lru(enum lru_list l) |
173 | { | 147 | { |
174 | return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); | 148 | return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE); |
175 | } | 149 | } |
176 | 150 | ||
177 | static inline int is_active_lru(enum lru_list lru) | 151 | static inline int is_active_lru(enum lru_list l) |
178 | { | 152 | { |
179 | return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); | 153 | return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE); |
180 | } | 154 | } |
181 | 155 | ||
182 | static inline int is_unevictable_lru(enum lru_list lru) | 156 | static inline int is_unevictable_lru(enum lru_list l) |
183 | { | 157 | { |
184 | return (lru == LRU_UNEVICTABLE); | 158 | return (l == LRU_UNEVICTABLE); |
185 | } | 159 | } |
186 | 160 | ||
187 | struct zone_reclaim_stat { | ||
188 | /* | ||
189 | * The pageout code in vmscan.c keeps track of how many of the | ||
190 | * mem/swap backed and file backed pages are referenced. | ||
191 | * The higher the rotated/scanned ratio, the more valuable | ||
192 | * that cache is. | ||
193 | * | ||
194 | * The anon LRU stats live in [0], file LRU stats in [1] | ||
195 | */ | ||
196 | unsigned long recent_rotated[2]; | ||
197 | unsigned long recent_scanned[2]; | ||
198 | }; | ||
199 | |||
200 | struct lruvec { | ||
201 | struct list_head lists[NR_LRU_LISTS]; | ||
202 | struct zone_reclaim_stat reclaim_stat; | ||
203 | #ifdef CONFIG_MEMCG | ||
204 | struct zone *zone; | ||
205 | #endif | ||
206 | }; | ||
207 | |||
208 | /* Mask used at gathering information at once (see memcontrol.c) */ | 161 | /* Mask used at gathering information at once (see memcontrol.c) */ |
209 | #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) | 162 | #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) |
210 | #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) | 163 | #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) |
164 | #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON) | ||
211 | #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) | 165 | #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) |
212 | 166 | ||
213 | /* Isolate clean file */ | ||
214 | #define ISOLATE_CLEAN ((__force isolate_mode_t)0x1) | ||
215 | /* Isolate unmapped file */ | ||
216 | #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) | ||
217 | /* Isolate for asynchronous migration */ | ||
218 | #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) | ||
219 | /* Isolate unevictable pages */ | ||
220 | #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) | ||
221 | |||
222 | /* LRU Isolation modes. */ | ||
223 | typedef unsigned __bitwise__ isolate_mode_t; | ||
224 | |||
225 | enum zone_watermarks { | 167 | enum zone_watermarks { |
226 | WMARK_MIN, | 168 | WMARK_MIN, |
227 | WMARK_LOW, | 169 | WMARK_LOW, |
@@ -326,6 +268,19 @@ enum zone_type { | |||
326 | #error ZONES_SHIFT -- too many zones configured adjust calculation | 268 | #error ZONES_SHIFT -- too many zones configured adjust calculation |
327 | #endif | 269 | #endif |
328 | 270 | ||
271 | struct zone_reclaim_stat { | ||
272 | /* | ||
273 | * The pageout code in vmscan.c keeps track of how many of the | ||
274 | * mem/swap backed and file backed pages are refeferenced. | ||
275 | * The higher the rotated/scanned ratio, the more valuable | ||
276 | * that cache is. | ||
277 | * | ||
278 | * The anon LRU stats live in [0], file LRU stats in [1] | ||
279 | */ | ||
280 | unsigned long recent_rotated[2]; | ||
281 | unsigned long recent_scanned[2]; | ||
282 | }; | ||
283 | |||
329 | struct zone { | 284 | struct zone { |
330 | /* Fields commonly accessed by the page allocator */ | 285 | /* Fields commonly accessed by the page allocator */ |
331 | 286 | ||
@@ -349,12 +304,6 @@ struct zone { | |||
349 | */ | 304 | */ |
350 | unsigned long lowmem_reserve[MAX_NR_ZONES]; | 305 | unsigned long lowmem_reserve[MAX_NR_ZONES]; |
351 | 306 | ||
352 | /* | ||
353 | * This is a per-zone reserve of pages that should not be | ||
354 | * considered dirtyable memory. | ||
355 | */ | ||
356 | unsigned long dirty_balance_reserve; | ||
357 | |||
358 | #ifdef CONFIG_NUMA | 307 | #ifdef CONFIG_NUMA |
359 | int node; | 308 | int node; |
360 | /* | 309 | /* |
@@ -369,14 +318,6 @@ struct zone { | |||
369 | */ | 318 | */ |
370 | spinlock_t lock; | 319 | spinlock_t lock; |
371 | int all_unreclaimable; /* All pages pinned */ | 320 | int all_unreclaimable; /* All pages pinned */ |
372 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | ||
373 | /* Set to true when the PG_migrate_skip bits should be cleared */ | ||
374 | bool compact_blockskip_flush; | ||
375 | |||
376 | /* pfns where compaction scanners should start */ | ||
377 | unsigned long compact_cached_free_pfn; | ||
378 | unsigned long compact_cached_migrate_pfn; | ||
379 | #endif | ||
380 | #ifdef CONFIG_MEMORY_HOTPLUG | 321 | #ifdef CONFIG_MEMORY_HOTPLUG |
381 | /* see spanned/present_pages for more description */ | 322 | /* see spanned/present_pages for more description */ |
382 | seqlock_t span_seqlock; | 323 | seqlock_t span_seqlock; |
@@ -399,14 +340,17 @@ struct zone { | |||
399 | */ | 340 | */ |
400 | unsigned int compact_considered; | 341 | unsigned int compact_considered; |
401 | unsigned int compact_defer_shift; | 342 | unsigned int compact_defer_shift; |
402 | int compact_order_failed; | ||
403 | #endif | 343 | #endif |
404 | 344 | ||
405 | ZONE_PADDING(_pad1_) | 345 | ZONE_PADDING(_pad1_) |
406 | 346 | ||
407 | /* Fields commonly accessed by the page reclaim scanner */ | 347 | /* Fields commonly accessed by the page reclaim scanner */ |
408 | spinlock_t lru_lock; | 348 | spinlock_t lru_lock; |
409 | struct lruvec lruvec; | 349 | struct zone_lru { |
350 | struct list_head list; | ||
351 | } lru[NR_LRU_LISTS]; | ||
352 | |||
353 | struct zone_reclaim_stat reclaim_stat; | ||
410 | 354 | ||
411 | unsigned long pages_scanned; /* since last reclaim */ | 355 | unsigned long pages_scanned; /* since last reclaim */ |
412 | unsigned long flags; /* zone flags, see below */ | 356 | unsigned long flags; /* zone flags, see below */ |
@@ -460,44 +404,17 @@ struct zone { | |||
460 | unsigned long zone_start_pfn; | 404 | unsigned long zone_start_pfn; |
461 | 405 | ||
462 | /* | 406 | /* |
463 | * spanned_pages is the total pages spanned by the zone, including | 407 | * zone_start_pfn, spanned_pages and present_pages are all |
464 | * holes, which is calculated as: | 408 | * protected by span_seqlock. It is a seqlock because it has |
465 | * spanned_pages = zone_end_pfn - zone_start_pfn; | 409 | * to be read outside of zone->lock, and it is done in the main |
466 | * | 410 | * allocator path. But, it is written quite infrequently. |
467 | * present_pages is physical pages existing within the zone, which | ||
468 | * is calculated as: | ||
469 | * present_pages = spanned_pages - absent_pages(pags in holes); | ||
470 | * | ||
471 | * managed_pages is present pages managed by the buddy system, which | ||
472 | * is calculated as (reserved_pages includes pages allocated by the | ||
473 | * bootmem allocator): | ||
474 | * managed_pages = present_pages - reserved_pages; | ||
475 | * | 411 | * |
476 | * So present_pages may be used by memory hotplug or memory power | 412 | * The lock is declared along with zone->lock because it is |
477 | * management logic to figure out unmanaged pages by checking | ||
478 | * (present_pages - managed_pages). And managed_pages should be used | ||
479 | * by page allocator and vm scanner to calculate all kinds of watermarks | ||
480 | * and thresholds. | ||
481 | * | ||
482 | * Locking rules: | ||
483 | * | ||
484 | * zone_start_pfn and spanned_pages are protected by span_seqlock. | ||
485 | * It is a seqlock because it has to be read outside of zone->lock, | ||
486 | * and it is done in the main allocator path. But, it is written | ||
487 | * quite infrequently. | ||
488 | * | ||
489 | * The span_seq lock is declared along with zone->lock because it is | ||
490 | * frequently read in proximity to zone->lock. It's good to | 413 | * frequently read in proximity to zone->lock. It's good to |
491 | * give them a chance of being in the same cacheline. | 414 | * give them a chance of being in the same cacheline. |
492 | * | ||
493 | * Write access to present_pages and managed_pages at runtime should | ||
494 | * be protected by lock_memory_hotplug()/unlock_memory_hotplug(). | ||
495 | * Any reader who can't tolerant drift of present_pages and | ||
496 | * managed_pages should hold memory hotplug lock to get a stable value. | ||
497 | */ | 415 | */ |
498 | unsigned long spanned_pages; | 416 | unsigned long spanned_pages; /* total size, including holes */ |
499 | unsigned long present_pages; | 417 | unsigned long present_pages; /* amount of memory (excluding holes) */ |
500 | unsigned long managed_pages; | ||
501 | 418 | ||
502 | /* | 419 | /* |
503 | * rarely used fields: | 420 | * rarely used fields: |
@@ -668,13 +585,13 @@ struct zonelist { | |||
668 | #endif | 585 | #endif |
669 | }; | 586 | }; |
670 | 587 | ||
671 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 588 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
672 | struct node_active_region { | 589 | struct node_active_region { |
673 | unsigned long start_pfn; | 590 | unsigned long start_pfn; |
674 | unsigned long end_pfn; | 591 | unsigned long end_pfn; |
675 | int nid; | 592 | int nid; |
676 | }; | 593 | }; |
677 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | 594 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ |
678 | 595 | ||
679 | #ifndef CONFIG_DISCONTIGMEM | 596 | #ifndef CONFIG_DISCONTIGMEM |
680 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | 597 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ |
@@ -699,7 +616,7 @@ typedef struct pglist_data { | |||
699 | int nr_zones; | 616 | int nr_zones; |
700 | #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ | 617 | #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ |
701 | struct page *node_mem_map; | 618 | struct page *node_mem_map; |
702 | #ifdef CONFIG_MEMCG | 619 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
703 | struct page_cgroup *node_page_cgroup; | 620 | struct page_cgroup *node_page_cgroup; |
704 | #endif | 621 | #endif |
705 | #endif | 622 | #endif |
@@ -721,25 +638,10 @@ typedef struct pglist_data { | |||
721 | unsigned long node_spanned_pages; /* total size of physical page | 638 | unsigned long node_spanned_pages; /* total size of physical page |
722 | range, including holes */ | 639 | range, including holes */ |
723 | int node_id; | 640 | int node_id; |
724 | nodemask_t reclaim_nodes; /* Nodes allowed to reclaim from */ | ||
725 | wait_queue_head_t kswapd_wait; | 641 | wait_queue_head_t kswapd_wait; |
726 | wait_queue_head_t pfmemalloc_wait; | 642 | struct task_struct *kswapd; |
727 | struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ | ||
728 | int kswapd_max_order; | 643 | int kswapd_max_order; |
729 | enum zone_type classzone_idx; | 644 | enum zone_type classzone_idx; |
730 | #ifdef CONFIG_NUMA_BALANCING | ||
731 | /* | ||
732 | * Lock serializing the per destination node AutoNUMA memory | ||
733 | * migration rate limiting data. | ||
734 | */ | ||
735 | spinlock_t numabalancing_migrate_lock; | ||
736 | |||
737 | /* Rate limiting time interval */ | ||
738 | unsigned long numabalancing_migrate_next_window; | ||
739 | |||
740 | /* Number of pages migrated during the rate limiting time interval */ | ||
741 | unsigned long numabalancing_migrate_nr_pages; | ||
742 | #endif | ||
743 | } pg_data_t; | 645 | } pg_data_t; |
744 | 646 | ||
745 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) | 647 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) |
@@ -761,7 +663,7 @@ typedef struct pglist_data { | |||
761 | #include <linux/memory_hotplug.h> | 663 | #include <linux/memory_hotplug.h> |
762 | 664 | ||
763 | extern struct mutex zonelists_mutex; | 665 | extern struct mutex zonelists_mutex; |
764 | void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); | 666 | void build_all_zonelists(void *data); |
765 | void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); | 667 | void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); |
766 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 668 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
767 | int classzone_idx, int alloc_flags); | 669 | int classzone_idx, int alloc_flags); |
@@ -775,17 +677,6 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | |||
775 | unsigned long size, | 677 | unsigned long size, |
776 | enum memmap_context context); | 678 | enum memmap_context context); |
777 | 679 | ||
778 | extern void lruvec_init(struct lruvec *lruvec); | ||
779 | |||
780 | static inline struct zone *lruvec_zone(struct lruvec *lruvec) | ||
781 | { | ||
782 | #ifdef CONFIG_MEMCG | ||
783 | return lruvec->zone; | ||
784 | #else | ||
785 | return container_of(lruvec, struct zone, lruvec); | ||
786 | #endif | ||
787 | } | ||
788 | |||
789 | #ifdef CONFIG_HAVE_MEMORY_PRESENT | 680 | #ifdef CONFIG_HAVE_MEMORY_PRESENT |
790 | void memory_present(int nid, unsigned long start, unsigned long end); | 681 | void memory_present(int nid, unsigned long start, unsigned long end); |
791 | #else | 682 | #else |
@@ -816,7 +707,7 @@ extern int movable_zone; | |||
816 | 707 | ||
817 | static inline int zone_movable_is_highmem(void) | 708 | static inline int zone_movable_is_highmem(void) |
818 | { | 709 | { |
819 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) | 710 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) |
820 | return movable_zone == ZONE_HIGHMEM; | 711 | return movable_zone == ZONE_HIGHMEM; |
821 | #else | 712 | #else |
822 | return 0; | 713 | return 0; |
@@ -1034,7 +925,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, | |||
1034 | #endif | 925 | #endif |
1035 | 926 | ||
1036 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ | 927 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ |
1037 | !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) | 928 | !defined(CONFIG_ARCH_POPULATES_NODE_MAP) |
1038 | static inline unsigned long early_pfn_to_nid(unsigned long pfn) | 929 | static inline unsigned long early_pfn_to_nid(unsigned long pfn) |
1039 | { | 930 | { |
1040 | return 0; | 931 | return 0; |
@@ -1095,7 +986,7 @@ struct mem_section { | |||
1095 | 986 | ||
1096 | /* See declaration of similar field in struct zone */ | 987 | /* See declaration of similar field in struct zone */ |
1097 | unsigned long *pageblock_flags; | 988 | unsigned long *pageblock_flags; |
1098 | #ifdef CONFIG_MEMCG | 989 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
1099 | /* | 990 | /* |
1100 | * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use | 991 | * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use |
1101 | * section. (see memcontrol.h/page_cgroup.h about this.) | 992 | * section. (see memcontrol.h/page_cgroup.h about this.) |