diff options
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 58 |
1 files changed, 33 insertions, 25 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 73b64a38b984..ede274957e05 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/seqlock.h> | 15 | #include <linux/seqlock.h> |
16 | #include <linux/nodemask.h> | 16 | #include <linux/nodemask.h> |
17 | #include <linux/pageblock-flags.h> | 17 | #include <linux/pageblock-flags.h> |
18 | #include <generated/bounds.h> | 18 | #include <linux/page-flags-layout.h> |
19 | #include <linux/atomic.h> | 19 | #include <linux/atomic.h> |
20 | #include <asm/page.h> | 20 | #include <asm/page.h> |
21 | 21 | ||
@@ -57,7 +57,9 @@ enum { | |||
57 | */ | 57 | */ |
58 | MIGRATE_CMA, | 58 | MIGRATE_CMA, |
59 | #endif | 59 | #endif |
60 | #ifdef CONFIG_MEMORY_ISOLATION | ||
60 | MIGRATE_ISOLATE, /* can't allocate from here */ | 61 | MIGRATE_ISOLATE, /* can't allocate from here */ |
62 | #endif | ||
61 | MIGRATE_TYPES | 63 | MIGRATE_TYPES |
62 | }; | 64 | }; |
63 | 65 | ||
@@ -308,24 +310,6 @@ enum zone_type { | |||
308 | 310 | ||
309 | #ifndef __GENERATING_BOUNDS_H | 311 | #ifndef __GENERATING_BOUNDS_H |
310 | 312 | ||
311 | /* | ||
312 | * When a memory allocation must conform to specific limitations (such | ||
313 | * as being suitable for DMA) the caller will pass in hints to the | ||
314 | * allocator in the gfp_mask, in the zone modifier bits. These bits | ||
315 | * are used to select a priority ordered list of memory zones which | ||
316 | * match the requested limits. See gfp_zone() in include/linux/gfp.h | ||
317 | */ | ||
318 | |||
319 | #if MAX_NR_ZONES < 2 | ||
320 | #define ZONES_SHIFT 0 | ||
321 | #elif MAX_NR_ZONES <= 2 | ||
322 | #define ZONES_SHIFT 1 | ||
323 | #elif MAX_NR_ZONES <= 4 | ||
324 | #define ZONES_SHIFT 2 | ||
325 | #else | ||
326 | #error ZONES_SHIFT -- too many zones configured adjust calculation | ||
327 | #endif | ||
328 | |||
329 | struct zone { | 313 | struct zone { |
330 | /* Fields commonly accessed by the page allocator */ | 314 | /* Fields commonly accessed by the page allocator */ |
331 | 315 | ||
@@ -543,6 +527,26 @@ static inline int zone_is_oom_locked(const struct zone *zone) | |||
543 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); | 527 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); |
544 | } | 528 | } |
545 | 529 | ||
530 | static inline unsigned zone_end_pfn(const struct zone *zone) | ||
531 | { | ||
532 | return zone->zone_start_pfn + zone->spanned_pages; | ||
533 | } | ||
534 | |||
535 | static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) | ||
536 | { | ||
537 | return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); | ||
538 | } | ||
539 | |||
540 | static inline bool zone_is_initialized(struct zone *zone) | ||
541 | { | ||
542 | return !!zone->wait_table; | ||
543 | } | ||
544 | |||
545 | static inline bool zone_is_empty(struct zone *zone) | ||
546 | { | ||
547 | return zone->spanned_pages == 0; | ||
548 | } | ||
549 | |||
546 | /* | 550 | /* |
547 | * The "priority" of VM scanning is how much of the queues we will scan in one | 551 | * The "priority" of VM scanning is how much of the queues we will scan in one |
548 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 552 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the |
@@ -752,11 +756,17 @@ typedef struct pglist_data { | |||
752 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) | 756 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) |
753 | 757 | ||
754 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | 758 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
759 | #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) | ||
755 | 760 | ||
756 | #define node_end_pfn(nid) ({\ | 761 | static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) |
757 | pg_data_t *__pgdat = NODE_DATA(nid);\ | 762 | { |
758 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\ | 763 | return pgdat->node_start_pfn + pgdat->node_spanned_pages; |
759 | }) | 764 | } |
765 | |||
766 | static inline bool pgdat_is_empty(pg_data_t *pgdat) | ||
767 | { | ||
768 | return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; | ||
769 | } | ||
760 | 770 | ||
761 | #include <linux/memory_hotplug.h> | 771 | #include <linux/memory_hotplug.h> |
762 | 772 | ||
@@ -1053,8 +1063,6 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) | |||
1053 | * PA_SECTION_SHIFT physical address to/from section number | 1063 | * PA_SECTION_SHIFT physical address to/from section number |
1054 | * PFN_SECTION_SHIFT pfn to/from section number | 1064 | * PFN_SECTION_SHIFT pfn to/from section number |
1055 | */ | 1065 | */ |
1056 | #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) | ||
1057 | |||
1058 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) | 1066 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) |
1059 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) | 1067 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) |
1060 | 1068 | ||