aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h26
1 files changed, 26 insertions, 0 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 186ec6ab334..a47c879e130 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1097,6 +1097,32 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1097#define pfn_valid_within(pfn) (1) 1097#define pfn_valid_within(pfn) (1)
1098#endif 1098#endif
1099 1099
1100#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1101/*
1102 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
1103 * associated with it or not. In FLATMEM, it is expected that holes always
1104 * have valid memmap as long as there is valid PFNs either side of the hole.
1105 * In SPARSEMEM, it is assumed that a valid section has a memmap for the
1106 * entire section.
1107 *
1108 * However, an ARM, and maybe other embedded architectures in the future
1109 * free memmap backing holes to save memory on the assumption the memmap is
1110 * never used. The page_zone linkages are then broken even though pfn_valid()
1111 * returns true. A walker of the full memmap must then do this additional
1112 * check to ensure the memmap they are looking at is sane by making sure
1113 * the zone and PFN linkages are still valid. This is expensive, but walkers
1114 * of the full memmap are extremely rare.
1115 */
1116int memmap_valid_within(unsigned long pfn,
1117 struct page *page, struct zone *zone);
1118#else
1119static inline int memmap_valid_within(unsigned long pfn,
1120 struct page *page, struct zone *zone)
1121{
1122 return 1;
1123}
1124#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
1125
1100#endif /* !__GENERATING_BOUNDS.H */ 1126#endif /* !__GENERATING_BOUNDS.H */
1101#endif /* !__ASSEMBLY__ */ 1127#endif /* !__ASSEMBLY__ */
1102#endif /* _LINUX_MMZONE_H */ 1128#endif /* _LINUX_MMZONE_H */