aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h41
1 files changed, 34 insertions, 7 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0c0b1d608a69..cd55dad56aac 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -460,17 +460,44 @@ struct zone {
460 unsigned long zone_start_pfn; 460 unsigned long zone_start_pfn;
461 461
462 /* 462 /*
463 * zone_start_pfn, spanned_pages and present_pages are all 463 * spanned_pages is the total pages spanned by the zone, including
464 * protected by span_seqlock. It is a seqlock because it has 464 * holes, which is calculated as:
465 * to be read outside of zone->lock, and it is done in the main 465 * spanned_pages = zone_end_pfn - zone_start_pfn;
466 * allocator path. But, it is written quite infrequently.
467 * 466 *
468 * The lock is declared along with zone->lock because it is 467 * present_pages is physical pages existing within the zone, which
468 * is calculated as:
469 * present_pages = spanned_pages - absent_pages(pags in holes);
470 *
471 * managed_pages is present pages managed by the buddy system, which
472 * is calculated as (reserved_pages includes pages allocated by the
473 * bootmem allocator):
474 * managed_pages = present_pages - reserved_pages;
475 *
476 * So present_pages may be used by memory hotplug or memory power
477 * management logic to figure out unmanaged pages by checking
478 * (present_pages - managed_pages). And managed_pages should be used
479 * by page allocator and vm scanner to calculate all kinds of watermarks
480 * and thresholds.
481 *
482 * Locking rules:
483 *
484 * zone_start_pfn and spanned_pages are protected by span_seqlock.
485 * It is a seqlock because it has to be read outside of zone->lock,
486 * and it is done in the main allocator path. But, it is written
487 * quite infrequently.
488 *
489 * The span_seq lock is declared along with zone->lock because it is
469 * frequently read in proximity to zone->lock. It's good to 490 * frequently read in proximity to zone->lock. It's good to
470 * give them a chance of being in the same cacheline. 491 * give them a chance of being in the same cacheline.
492 *
493 * Write access to present_pages and managed_pages at runtime should
494 * be protected by lock_memory_hotplug()/unlock_memory_hotplug().
495 * Any reader who can't tolerant drift of present_pages and
496 * managed_pages should hold memory hotplug lock to get a stable value.
471 */ 497 */
472 unsigned long spanned_pages; /* total size, including holes */ 498 unsigned long spanned_pages;
473 unsigned long present_pages; /* amount of memory (excluding holes) */ 499 unsigned long present_pages;
500 unsigned long managed_pages;
474 501
475 /* 502 /*
476 * rarely used fields: 503 * rarely used fields: