diff options
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 40 |
1 files changed, 35 insertions, 5 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5c76737d836b..ae19af5ec02c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -474,10 +474,16 @@ struct zone { | |||
474 | * frequently read in proximity to zone->lock. It's good to | 474 | * frequently read in proximity to zone->lock. It's good to |
475 | * give them a chance of being in the same cacheline. | 475 | * give them a chance of being in the same cacheline. |
476 | * | 476 | * |
477 | * Write access to present_pages and managed_pages at runtime should | 477 | * Write access to present_pages at runtime should be protected by |
478 | * be protected by lock_memory_hotplug()/unlock_memory_hotplug(). | 478 | * lock_memory_hotplug()/unlock_memory_hotplug(). Any reader who can't |
479 | * Any reader who can't tolerant drift of present_pages and | 479 | * tolerant drift of present_pages should hold memory hotplug lock to |
480 | * managed_pages should hold memory hotplug lock to get a stable value. | 480 | * get a stable value. |
481 | * | ||
482 | * Read access to managed_pages should be safe because it's unsigned | ||
483 | * long. Write access to zone->managed_pages and totalram_pages are | ||
484 | * protected by managed_page_count_lock at runtime. Idealy only | ||
485 | * adjust_managed_page_count() should be used instead of directly | ||
486 | * touching zone->managed_pages and totalram_pages. | ||
481 | */ | 487 | */ |
482 | unsigned long spanned_pages; | 488 | unsigned long spanned_pages; |
483 | unsigned long present_pages; | 489 | unsigned long present_pages; |
@@ -495,6 +501,13 @@ typedef enum { | |||
495 | ZONE_CONGESTED, /* zone has many dirty pages backed by | 501 | ZONE_CONGESTED, /* zone has many dirty pages backed by |
496 | * a congested BDI | 502 | * a congested BDI |
497 | */ | 503 | */ |
504 | ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found | ||
505 | * many dirty file pages at the tail | ||
506 | * of the LRU. | ||
507 | */ | ||
508 | ZONE_WRITEBACK, /* reclaim scanning has recently found | ||
509 | * many pages under writeback | ||
510 | */ | ||
498 | } zone_flags_t; | 511 | } zone_flags_t; |
499 | 512 | ||
500 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) | 513 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) |
@@ -517,6 +530,16 @@ static inline int zone_is_reclaim_congested(const struct zone *zone) | |||
517 | return test_bit(ZONE_CONGESTED, &zone->flags); | 530 | return test_bit(ZONE_CONGESTED, &zone->flags); |
518 | } | 531 | } |
519 | 532 | ||
533 | static inline int zone_is_reclaim_dirty(const struct zone *zone) | ||
534 | { | ||
535 | return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags); | ||
536 | } | ||
537 | |||
538 | static inline int zone_is_reclaim_writeback(const struct zone *zone) | ||
539 | { | ||
540 | return test_bit(ZONE_WRITEBACK, &zone->flags); | ||
541 | } | ||
542 | |||
520 | static inline int zone_is_reclaim_locked(const struct zone *zone) | 543 | static inline int zone_is_reclaim_locked(const struct zone *zone) |
521 | { | 544 | { |
522 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | 545 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); |
@@ -716,7 +739,10 @@ typedef struct pglist_data { | |||
716 | * or node_spanned_pages stay constant. Holding this will also | 739 | * or node_spanned_pages stay constant. Holding this will also |
717 | * guarantee that any pfn_valid() stays that way. | 740 | * guarantee that any pfn_valid() stays that way. |
718 | * | 741 | * |
719 | * Nests above zone->lock and zone->size_seqlock. | 742 | * pgdat_resize_lock() and pgdat_resize_unlock() are provided to |
743 | * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG. | ||
744 | * | ||
745 | * Nests above zone->lock and zone->span_seqlock | ||
720 | */ | 746 | */ |
721 | spinlock_t node_size_lock; | 747 | spinlock_t node_size_lock; |
722 | #endif | 748 | #endif |
@@ -1111,6 +1137,10 @@ struct mem_section { | |||
1111 | struct page_cgroup *page_cgroup; | 1137 | struct page_cgroup *page_cgroup; |
1112 | unsigned long pad; | 1138 | unsigned long pad; |
1113 | #endif | 1139 | #endif |
1140 | /* | ||
1141 | * WARNING: mem_section must be a power-of-2 in size for the | ||
1142 | * calculation and use of SECTION_ROOT_MASK to make sense. | ||
1143 | */ | ||
1114 | }; | 1144 | }; |
1115 | 1145 | ||
1116 | #ifdef CONFIG_SPARSEMEM_EXTREME | 1146 | #ifdef CONFIG_SPARSEMEM_EXTREME |