aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h12
1 files changed, 10 insertions, 2 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 67f2e3c38939..7522a6987595 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1166,8 +1166,16 @@ extern unsigned long usemap_size(void);
1166 1166
1167/* 1167/*
1168 * We use the lower bits of the mem_map pointer to store 1168 * We use the lower bits of the mem_map pointer to store
1169 * a little bit of information. There should be at least 1169 * a little bit of information. The pointer is calculated
1170 * 3 bits here due to 32-bit alignment. 1170 * as mem_map - section_nr_to_pfn(pnum). The result is
1171 * aligned to the minimum alignment of the two values:
1172 * 1. All mem_map arrays are page-aligned.
1173 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
1174 * lowest bits. PFN_SECTION_SHIFT is arch-specific
1175 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
1176 * worst combination is powerpc with 256k pages,
1177 * which results in PFN_SECTION_SHIFT equal 6.
1178 * To sum it up, at least 6 bits are available.
1171 */ 1179 */
1172#define SECTION_MARKED_PRESENT (1UL<<0) 1180#define SECTION_MARKED_PRESENT (1UL<<0)
1173#define SECTION_HAS_MEM_MAP (1UL<<1) 1181#define SECTION_HAS_MEM_MAP (1UL<<1)