aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-08 13:22:09 -0500
committerTejun Heo <tj@kernel.org>2011-12-08 13:22:09 -0500
commit0ee332c1451869963626bf9cac88f165a90990e1 (patch)
treea40e6c9c6cfe39ecbca37a08019be3c9e56a4a9b /include/linux/mm.h
parenta2bf79e7dcc97b4e9654f273453f9264f49e41ff (diff)
memblock: Kill early_node_map[]
Now all ARCH_POPULATES_NODE_MAP archs select HAVE_MEBLOCK_NODE_MAP - there's no user of early_node_map[] left. Kill early_node_map[] and replace ARCH_POPULATES_NODE_MAP with HAVE_MEMBLOCK_NODE_MAP. Also, relocate for_each_mem_pfn_range() and helper from mm.h to memblock.h as page_alloc.c would no longer host an alternative implementation. This change is ultimately one to one mapping and shouldn't cause any observable difference; however, after the recent changes, there are some functions which now would fit memblock.c better than page_alloc.c and dependency on HAVE_MEMBLOCK_NODE_MAP instead of HAVE_MEMBLOCK doesn't make much sense on some of them. Further cleanups for functions inside HAVE_MEMBLOCK_NODE_MAP in mm.h would be nice. -v2: Fix compile bug introduced by mis-spelling CONFIG_HAVE_MEMBLOCK_NODE_MAP to CONFIG_MEMBLOCK_HAVE_NODE_MAP in mmzone.h. Reported by Stephen Rothwell. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Chen Liqin <liqin.chen@sunplusct.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: "H. Peter Anvin" <hpa@zytor.com>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h50
1 files changed, 11 insertions, 39 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6b365aee8396..c6f49bea52a3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1252,43 +1252,34 @@ static inline void pgtable_page_dtor(struct page *page)
1252extern void free_area_init(unsigned long * zones_size); 1252extern void free_area_init(unsigned long * zones_size);
1253extern void free_area_init_node(int nid, unsigned long * zones_size, 1253extern void free_area_init_node(int nid, unsigned long * zones_size,
1254 unsigned long zone_start_pfn, unsigned long *zholes_size); 1254 unsigned long zone_start_pfn, unsigned long *zholes_size);
1255#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 1255#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1256/* 1256/*
1257 * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its 1257 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
1258 * zones, allocate the backing mem_map and account for memory holes in a more 1258 * zones, allocate the backing mem_map and account for memory holes in a more
1259 * architecture independent manner. This is a substitute for creating the 1259 * architecture independent manner. This is a substitute for creating the
1260 * zone_sizes[] and zholes_size[] arrays and passing them to 1260 * zone_sizes[] and zholes_size[] arrays and passing them to
1261 * free_area_init_node() 1261 * free_area_init_node()
1262 * 1262 *
1263 * An architecture is expected to register range of page frames backed by 1263 * An architecture is expected to register range of page frames backed by
1264 * physical memory with add_active_range() before calling 1264 * physical memory with memblock_add[_node]() before calling
1265 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic 1265 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
1266 * usage, an architecture is expected to do something like 1266 * usage, an architecture is expected to do something like
1267 * 1267 *
1268 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 1268 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
1269 * max_highmem_pfn}; 1269 * max_highmem_pfn};
1270 * for_each_valid_physical_page_range() 1270 * for_each_valid_physical_page_range()
1271 * add_active_range(node_id, start_pfn, end_pfn) 1271 * memblock_add_node(base, size, nid)
1272 * free_area_init_nodes(max_zone_pfns); 1272 * free_area_init_nodes(max_zone_pfns);
1273 * 1273 *
1274 * If the architecture guarantees that there are no holes in the ranges 1274 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
1275 * registered with add_active_range(), free_bootmem_active_regions() 1275 * registered physical page range. Similarly
1276 * will call free_bootmem_node() for each registered physical page range. 1276 * sparse_memory_present_with_active_regions() calls memory_present() for
1277 * Similarly sparse_memory_present_with_active_regions() calls 1277 * each range when SPARSEMEM is enabled.
1278 * memory_present() for each range when SPARSEMEM is enabled.
1279 * 1278 *
1280 * See mm/page_alloc.c for more information on each function exposed by 1279 * See mm/page_alloc.c for more information on each function exposed by
1281 * CONFIG_ARCH_POPULATES_NODE_MAP 1280 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
1282 */ 1281 */
1283extern void free_area_init_nodes(unsigned long *max_zone_pfn); 1282extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1284#ifndef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1285extern void add_active_range(unsigned int nid, unsigned long start_pfn,
1286 unsigned long end_pfn);
1287extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
1288 unsigned long end_pfn);
1289extern void remove_all_active_ranges(void);
1290void sort_node_map(void);
1291#endif
1292unsigned long node_map_pfn_alignment(void); 1283unsigned long node_map_pfn_alignment(void);
1293unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 1284unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1294 unsigned long end_pfn); 1285 unsigned long end_pfn);
@@ -1303,28 +1294,9 @@ int add_from_early_node_map(struct range *range, int az,
1303 int nr_range, int nid); 1294 int nr_range, int nid);
1304extern void sparse_memory_present_with_active_regions(int nid); 1295extern void sparse_memory_present_with_active_regions(int nid);
1305 1296
1306extern void __next_mem_pfn_range(int *idx, int nid, 1297#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1307 unsigned long *out_start_pfn,
1308 unsigned long *out_end_pfn, int *out_nid);
1309
1310/**
1311 * for_each_mem_pfn_range - early memory pfn range iterator
1312 * @i: an integer used as loop variable
1313 * @nid: node selector, %MAX_NUMNODES for all nodes
1314 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
1315 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
1316 * @p_nid: ptr to int for nid of the range, can be %NULL
1317 *
1318 * Walks over configured memory ranges. Available after early_node_map is
1319 * populated.
1320 */
1321#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
1322 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
1323 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
1324
1325#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
1326 1298
1327#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ 1299#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1328 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 1300 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1329static inline int __early_pfn_to_nid(unsigned long pfn) 1301static inline int __early_pfn_to_nid(unsigned long pfn)
1330{ 1302{