aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h63
1 files changed, 47 insertions, 16 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a23923ba826..4bec5be82ca 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -63,10 +63,8 @@ enum {
63 63
64#ifdef CONFIG_CMA 64#ifdef CONFIG_CMA
65# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 65# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
66# define cma_wmark_pages(zone) zone->min_cma_pages
67#else 66#else
68# define is_migrate_cma(migratetype) false 67# define is_migrate_cma(migratetype) false
69# define cma_wmark_pages(zone) 0
70#endif 68#endif
71 69
72#define for_each_migratetype_order(order, type) \ 70#define for_each_migratetype_order(order, type) \
@@ -383,13 +381,6 @@ struct zone {
383 /* see spanned/present_pages for more description */ 381 /* see spanned/present_pages for more description */
384 seqlock_t span_seqlock; 382 seqlock_t span_seqlock;
385#endif 383#endif
386#ifdef CONFIG_CMA
387 /*
388 * CMA needs to increase watermark levels during the allocation
389 * process to make sure that the system is not starved.
390 */
391 unsigned long min_cma_pages;
392#endif
393 struct free_area free_area[MAX_ORDER]; 384 struct free_area free_area[MAX_ORDER];
394 385
395#ifndef CONFIG_SPARSEMEM 386#ifndef CONFIG_SPARSEMEM
@@ -469,17 +460,44 @@ struct zone {
469 unsigned long zone_start_pfn; 460 unsigned long zone_start_pfn;
470 461
471 /* 462 /*
472 * zone_start_pfn, spanned_pages and present_pages are all 463 * spanned_pages is the total pages spanned by the zone, including
473 * protected by span_seqlock. It is a seqlock because it has 464 * holes, which is calculated as:
474 * to be read outside of zone->lock, and it is done in the main 465 * spanned_pages = zone_end_pfn - zone_start_pfn;
475 * allocator path. But, it is written quite infrequently. 466 *
467 * present_pages is physical pages existing within the zone, which
468 * is calculated as:
469 * present_pages = spanned_pages - absent_pages(pags in holes);
470 *
471 * managed_pages is present pages managed by the buddy system, which
472 * is calculated as (reserved_pages includes pages allocated by the
473 * bootmem allocator):
474 * managed_pages = present_pages - reserved_pages;
475 *
476 * So present_pages may be used by memory hotplug or memory power
477 * management logic to figure out unmanaged pages by checking
478 * (present_pages - managed_pages). And managed_pages should be used
479 * by page allocator and vm scanner to calculate all kinds of watermarks
480 * and thresholds.
481 *
482 * Locking rules:
476 * 483 *
477 * The lock is declared along with zone->lock because it is 484 * zone_start_pfn and spanned_pages are protected by span_seqlock.
485 * It is a seqlock because it has to be read outside of zone->lock,
486 * and it is done in the main allocator path. But, it is written
487 * quite infrequently.
488 *
489 * The span_seq lock is declared along with zone->lock because it is
478 * frequently read in proximity to zone->lock. It's good to 490 * frequently read in proximity to zone->lock. It's good to
479 * give them a chance of being in the same cacheline. 491 * give them a chance of being in the same cacheline.
492 *
493 * Write access to present_pages and managed_pages at runtime should
494 * be protected by lock_memory_hotplug()/unlock_memory_hotplug().
495 * Any reader who can't tolerant drift of present_pages and
496 * managed_pages should hold memory hotplug lock to get a stable value.
480 */ 497 */
481 unsigned long spanned_pages; /* total size, including holes */ 498 unsigned long spanned_pages;
482 unsigned long present_pages; /* amount of memory (excluding holes) */ 499 unsigned long present_pages;
500 unsigned long managed_pages;
483 501
484 /* 502 /*
485 * rarely used fields: 503 * rarely used fields:
@@ -717,6 +735,19 @@ typedef struct pglist_data {
717 struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ 735 struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
718 int kswapd_max_order; 736 int kswapd_max_order;
719 enum zone_type classzone_idx; 737 enum zone_type classzone_idx;
738#ifdef CONFIG_NUMA_BALANCING
739 /*
740 * Lock serializing the per destination node AutoNUMA memory
741 * migration rate limiting data.
742 */
743 spinlock_t numabalancing_migrate_lock;
744
745 /* Rate limiting time interval */
746 unsigned long numabalancing_migrate_next_window;
747
748 /* Number of pages migrated during the rate limiting time interval */
749 unsigned long numabalancing_migrate_nr_pages;
750#endif
720} pg_data_t; 751} pg_data_t;
721 752
722#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 753#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)