aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/mm/init.c5
-rw-r--r--arch/s390/mm/vmem.c3
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/page_alloc.c25
6 files changed, 34 insertions, 16 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1a3d8a2feb94..1373fae7657f 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -543,7 +543,8 @@ virtual_memmap_init (u64 start, u64 end, void *arg)
543 543
544 if (map_start < map_end) 544 if (map_start < map_end)
545 memmap_init_zone((unsigned long)(map_end - map_start), 545 memmap_init_zone((unsigned long)(map_end - map_start),
546 args->nid, args->zone, page_to_pfn(map_start)); 546 args->nid, args->zone, page_to_pfn(map_start),
547 MEMMAP_EARLY);
547 return 0; 548 return 0;
548} 549}
549 550
@@ -552,7 +553,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
552 unsigned long start_pfn) 553 unsigned long start_pfn)
553{ 554{
554 if (!vmem_map) 555 if (!vmem_map)
555 memmap_init_zone(size, nid, zone, start_pfn); 556 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
556 else { 557 else {
557 struct page *start; 558 struct page *start;
558 struct memmap_init_callback_data args; 559 struct memmap_init_callback_data args;
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 7f2944d3ec2a..cd3d93e8c211 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -61,7 +61,8 @@ void memmap_init(unsigned long size, int nid, unsigned long zone,
61 61
62 if (map_start < map_end) 62 if (map_start < map_end)
63 memmap_init_zone((unsigned long)(map_end - map_start), 63 memmap_init_zone((unsigned long)(map_end - map_start),
64 nid, zone, page_to_pfn(map_start)); 64 nid, zone, page_to_pfn(map_start),
65 MEMMAP_EARLY);
65 } 66 }
66} 67}
67 68
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a17b147c61e7..76912231af41 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -978,7 +978,8 @@ extern int early_pfn_to_nid(unsigned long pfn);
978#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 978#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
979#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 979#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
980extern void set_dma_reserve(unsigned long new_dma_reserve); 980extern void set_dma_reserve(unsigned long new_dma_reserve);
981extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long); 981extern void memmap_init_zone(unsigned long, int, unsigned long,
982 unsigned long, enum memmap_context);
982extern void setup_per_zone_pages_min(void); 983extern void setup_per_zone_pages_min(void);
983extern void mem_init(void); 984extern void mem_init(void);
984extern void show_mem(void); 985extern void show_mem(void);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e339a7345f25..b262f47961fb 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -450,9 +450,13 @@ void build_all_zonelists(void);
450void wakeup_kswapd(struct zone *zone, int order); 450void wakeup_kswapd(struct zone *zone, int order);
451int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 451int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
452 int classzone_idx, int alloc_flags); 452 int classzone_idx, int alloc_flags);
453 453enum memmap_context {
454 MEMMAP_EARLY,
455 MEMMAP_HOTPLUG,
456};
454extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, 457extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
455 unsigned long size); 458 unsigned long size,
459 enum memmap_context context);
456 460
457#ifdef CONFIG_HAVE_MEMORY_PRESENT 461#ifdef CONFIG_HAVE_MEMORY_PRESENT
458void memory_present(int nid, unsigned long start, unsigned long end); 462void memory_present(int nid, unsigned long start, unsigned long end);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0c055a090f4d..84279127fcd3 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -67,11 +67,13 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
67 zone_type = zone - pgdat->node_zones; 67 zone_type = zone - pgdat->node_zones;
68 if (!populated_zone(zone)) { 68 if (!populated_zone(zone)) {
69 int ret = 0; 69 int ret = 0;
70 ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages); 70 ret = init_currently_empty_zone(zone, phys_start_pfn,
71 nr_pages, MEMMAP_HOTPLUG);
71 if (ret < 0) 72 if (ret < 0)
72 return ret; 73 return ret;
73 } 74 }
74 memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn); 75 memmap_init_zone(nr_pages, nid, zone_type,
76 phys_start_pfn, MEMMAP_HOTPLUG);
75 return 0; 77 return 0;
76} 78}
77 79
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a49f96b7ea43..fc5b5442e942 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1956,17 +1956,24 @@ static inline unsigned long wait_table_bits(unsigned long size)
1956 * done. Non-atomic initialization, single-pass. 1956 * done. Non-atomic initialization, single-pass.
1957 */ 1957 */
1958void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1958void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1959 unsigned long start_pfn) 1959 unsigned long start_pfn, enum memmap_context context)
1960{ 1960{
1961 struct page *page; 1961 struct page *page;
1962 unsigned long end_pfn = start_pfn + size; 1962 unsigned long end_pfn = start_pfn + size;
1963 unsigned long pfn; 1963 unsigned long pfn;
1964 1964
1965 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1965 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1966 if (!early_pfn_valid(pfn)) 1966 /*
1967 continue; 1967 * There can be holes in boot-time mem_map[]s
1968 if (!early_pfn_in_nid(pfn, nid)) 1968 * handed to this function. They do not
1969 continue; 1969 * exist on hotplugged memory.
1970 */
1971 if (context == MEMMAP_EARLY) {
1972 if (!early_pfn_valid(pfn))
1973 continue;
1974 if (!early_pfn_in_nid(pfn, nid))
1975 continue;
1976 }
1970 page = pfn_to_page(pfn); 1977 page = pfn_to_page(pfn);
1971 set_page_links(page, zone, nid, pfn); 1978 set_page_links(page, zone, nid, pfn);
1972 init_page_count(page); 1979 init_page_count(page);
@@ -1993,7 +2000,7 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
1993 2000
1994#ifndef __HAVE_ARCH_MEMMAP_INIT 2001#ifndef __HAVE_ARCH_MEMMAP_INIT
1995#define memmap_init(size, nid, zone, start_pfn) \ 2002#define memmap_init(size, nid, zone, start_pfn) \
1996 memmap_init_zone((size), (nid), (zone), (start_pfn)) 2003 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1997#endif 2004#endif
1998 2005
1999static int __cpuinit zone_batchsize(struct zone *zone) 2006static int __cpuinit zone_batchsize(struct zone *zone)
@@ -2239,7 +2246,8 @@ static __meminit void zone_pcp_init(struct zone *zone)
2239 2246
2240__meminit int init_currently_empty_zone(struct zone *zone, 2247__meminit int init_currently_empty_zone(struct zone *zone,
2241 unsigned long zone_start_pfn, 2248 unsigned long zone_start_pfn,
2242 unsigned long size) 2249 unsigned long size,
2250 enum memmap_context context)
2243{ 2251{
2244 struct pglist_data *pgdat = zone->zone_pgdat; 2252 struct pglist_data *pgdat = zone->zone_pgdat;
2245 int ret; 2253 int ret;
@@ -2683,7 +2691,8 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
2683 if (!size) 2691 if (!size)
2684 continue; 2692 continue;
2685 2693
2686 ret = init_currently_empty_zone(zone, zone_start_pfn, size); 2694 ret = init_currently_empty_zone(zone, zone_start_pfn,
2695 size, MEMMAP_EARLY);
2687 BUG_ON(ret); 2696 BUG_ON(ret);
2688 zone_start_pfn += size; 2697 zone_start_pfn += size;
2689 } 2698 }