aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-08 13:22:09 -0500
committerTejun Heo <tj@kernel.org>2011-12-08 13:22:09 -0500
commit0ee332c1451869963626bf9cac88f165a90990e1 (patch)
treea40e6c9c6cfe39ecbca37a08019be3c9e56a4a9b /include/linux
parenta2bf79e7dcc97b4e9654f273453f9264f49e41ff (diff)
memblock: Kill early_node_map[]
Now all ARCH_POPULATES_NODE_MAP archs select HAVE_MEBLOCK_NODE_MAP - there's no user of early_node_map[] left. Kill early_node_map[] and replace ARCH_POPULATES_NODE_MAP with HAVE_MEMBLOCK_NODE_MAP. Also, relocate for_each_mem_pfn_range() and helper from mm.h to memblock.h as page_alloc.c would no longer host an alternative implementation. This change is ultimately one to one mapping and shouldn't cause any observable difference; however, after the recent changes, there are some functions which now would fit memblock.c better than page_alloc.c and dependency on HAVE_MEMBLOCK_NODE_MAP instead of HAVE_MEMBLOCK doesn't make much sense on some of them. Further cleanups for functions inside HAVE_MEMBLOCK_NODE_MAP in mm.h would be nice. -v2: Fix compile bug introduced by mis-spelling CONFIG_HAVE_MEMBLOCK_NODE_MAP to CONFIG_MEMBLOCK_HAVE_NODE_MAP in mmzone.h. Reported by Stephen Rothwell. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Chen Liqin <liqin.chen@sunplusct.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: "H. Peter Anvin" <hpa@zytor.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/memblock.h23
-rw-r--r--include/linux/mm.h50
-rw-r--r--include/linux/mmzone.h8
3 files changed, 35 insertions, 46 deletions
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index c7b68f489d46..cd7606b71e5a 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -58,6 +58,26 @@ int memblock_remove(phys_addr_t base, phys_addr_t size);
58int memblock_free(phys_addr_t base, phys_addr_t size); 58int memblock_free(phys_addr_t base, phys_addr_t size);
59int memblock_reserve(phys_addr_t base, phys_addr_t size); 59int memblock_reserve(phys_addr_t base, phys_addr_t size);
60 60
61#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
62void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
63 unsigned long *out_end_pfn, int *out_nid);
64
65/**
66 * for_each_mem_pfn_range - early memory pfn range iterator
67 * @i: an integer used as loop variable
68 * @nid: node selector, %MAX_NUMNODES for all nodes
69 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
70 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
71 * @p_nid: ptr to int for nid of the range, can be %NULL
72 *
73 * Walks over configured memory ranges. Available after early_node_map is
74 * populated.
75 */
76#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
77 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
78 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
79#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
80
61void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, 81void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
62 phys_addr_t *out_end, int *out_nid); 82 phys_addr_t *out_end, int *out_nid);
63 83
@@ -101,9 +121,6 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
101} 121}
102#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 122#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
103 123
104/* The numa aware allocator is only available if
105 * CONFIG_ARCH_POPULATES_NODE_MAP is set
106 */
107phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end, 124phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
108 phys_addr_t size, phys_addr_t align, int nid); 125 phys_addr_t size, phys_addr_t align, int nid);
109phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); 126phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6b365aee8396..c6f49bea52a3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1252,43 +1252,34 @@ static inline void pgtable_page_dtor(struct page *page)
1252extern void free_area_init(unsigned long * zones_size); 1252extern void free_area_init(unsigned long * zones_size);
1253extern void free_area_init_node(int nid, unsigned long * zones_size, 1253extern void free_area_init_node(int nid, unsigned long * zones_size,
1254 unsigned long zone_start_pfn, unsigned long *zholes_size); 1254 unsigned long zone_start_pfn, unsigned long *zholes_size);
1255#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 1255#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1256/* 1256/*
1257 * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its 1257 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
1258 * zones, allocate the backing mem_map and account for memory holes in a more 1258 * zones, allocate the backing mem_map and account for memory holes in a more
1259 * architecture independent manner. This is a substitute for creating the 1259 * architecture independent manner. This is a substitute for creating the
1260 * zone_sizes[] and zholes_size[] arrays and passing them to 1260 * zone_sizes[] and zholes_size[] arrays and passing them to
1261 * free_area_init_node() 1261 * free_area_init_node()
1262 * 1262 *
1263 * An architecture is expected to register range of page frames backed by 1263 * An architecture is expected to register range of page frames backed by
1264 * physical memory with add_active_range() before calling 1264 * physical memory with memblock_add[_node]() before calling
1265 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic 1265 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
1266 * usage, an architecture is expected to do something like 1266 * usage, an architecture is expected to do something like
1267 * 1267 *
1268 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 1268 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
1269 * max_highmem_pfn}; 1269 * max_highmem_pfn};
1270 * for_each_valid_physical_page_range() 1270 * for_each_valid_physical_page_range()
1271 * add_active_range(node_id, start_pfn, end_pfn) 1271 * memblock_add_node(base, size, nid)
1272 * free_area_init_nodes(max_zone_pfns); 1272 * free_area_init_nodes(max_zone_pfns);
1273 * 1273 *
1274 * If the architecture guarantees that there are no holes in the ranges 1274 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
1275 * registered with add_active_range(), free_bootmem_active_regions() 1275 * registered physical page range. Similarly
1276 * will call free_bootmem_node() for each registered physical page range. 1276 * sparse_memory_present_with_active_regions() calls memory_present() for
1277 * Similarly sparse_memory_present_with_active_regions() calls 1277 * each range when SPARSEMEM is enabled.
1278 * memory_present() for each range when SPARSEMEM is enabled.
1279 * 1278 *
1280 * See mm/page_alloc.c for more information on each function exposed by 1279 * See mm/page_alloc.c for more information on each function exposed by
1281 * CONFIG_ARCH_POPULATES_NODE_MAP 1280 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
1282 */ 1281 */
1283extern void free_area_init_nodes(unsigned long *max_zone_pfn); 1282extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1284#ifndef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1285extern void add_active_range(unsigned int nid, unsigned long start_pfn,
1286 unsigned long end_pfn);
1287extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
1288 unsigned long end_pfn);
1289extern void remove_all_active_ranges(void);
1290void sort_node_map(void);
1291#endif
1292unsigned long node_map_pfn_alignment(void); 1283unsigned long node_map_pfn_alignment(void);
1293unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 1284unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1294 unsigned long end_pfn); 1285 unsigned long end_pfn);
@@ -1303,28 +1294,9 @@ int add_from_early_node_map(struct range *range, int az,
1303 int nr_range, int nid); 1294 int nr_range, int nid);
1304extern void sparse_memory_present_with_active_regions(int nid); 1295extern void sparse_memory_present_with_active_regions(int nid);
1305 1296
1306extern void __next_mem_pfn_range(int *idx, int nid, 1297#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1307 unsigned long *out_start_pfn,
1308 unsigned long *out_end_pfn, int *out_nid);
1309
1310/**
1311 * for_each_mem_pfn_range - early memory pfn range iterator
1312 * @i: an integer used as loop variable
1313 * @nid: node selector, %MAX_NUMNODES for all nodes
1314 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
1315 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
1316 * @p_nid: ptr to int for nid of the range, can be %NULL
1317 *
1318 * Walks over configured memory ranges. Available after early_node_map is
1319 * populated.
1320 */
1321#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
1322 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
1323 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
1324
1325#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
1326 1298
1327#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ 1299#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1328 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 1300 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1329static inline int __early_pfn_to_nid(unsigned long pfn) 1301static inline int __early_pfn_to_nid(unsigned long pfn)
1330{ 1302{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 188cb2ffe8db..3ac040f19369 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -598,13 +598,13 @@ struct zonelist {
598#endif 598#endif
599}; 599};
600 600
601#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 601#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
602struct node_active_region { 602struct node_active_region {
603 unsigned long start_pfn; 603 unsigned long start_pfn;
604 unsigned long end_pfn; 604 unsigned long end_pfn;
605 int nid; 605 int nid;
606}; 606};
607#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 607#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
608 608
609#ifndef CONFIG_DISCONTIGMEM 609#ifndef CONFIG_DISCONTIGMEM
610/* The array of struct pages - for discontigmem use pgdat->lmem_map */ 610/* The array of struct pages - for discontigmem use pgdat->lmem_map */
@@ -720,7 +720,7 @@ extern int movable_zone;
720 720
721static inline int zone_movable_is_highmem(void) 721static inline int zone_movable_is_highmem(void)
722{ 722{
723#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) 723#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE)
724 return movable_zone == ZONE_HIGHMEM; 724 return movable_zone == ZONE_HIGHMEM;
725#else 725#else
726 return 0; 726 return 0;
@@ -938,7 +938,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
938#endif 938#endif
939 939
940#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ 940#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
941 !defined(CONFIG_ARCH_POPULATES_NODE_MAP) 941 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
942static inline unsigned long early_pfn_to_nid(unsigned long pfn) 942static inline unsigned long early_pfn_to_nid(unsigned long pfn)
943{ 943{
944 return 0; 944 return 0;