aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/memory_hotplug.h22
-rw-r--r--include/linux/mmzone.h35
-rw-r--r--mm/memory_hotplug.c4
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/sparse.c45
5 files changed, 103 insertions, 8 deletions
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 3c8cf86201c3..a61aede1b391 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -14,6 +14,20 @@ struct memory_block;
14struct resource; 14struct resource;
15 15
16#ifdef CONFIG_MEMORY_HOTPLUG 16#ifdef CONFIG_MEMORY_HOTPLUG
17/*
18 * Return page for the valid pfn only if the page is online. All pfn
19 * walkers which rely on the fully initialized page->flags and others
20 * should use this rather than pfn_valid && pfn_to_page
21 */
22#define pfn_to_online_page(pfn) \
23({ \
24 struct page *___page = NULL; \
25 unsigned long ___nr = pfn_to_section_nr(pfn); \
26 \
27 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
28 ___page = pfn_to_page(pfn); \
29 ___page; \
30})
17 31
18/* 32/*
19 * Types for free bootmem stored in page->lru.next. These have to be in 33 * Types for free bootmem stored in page->lru.next. These have to be in
@@ -203,6 +217,14 @@ extern void set_zone_contiguous(struct zone *zone);
203extern void clear_zone_contiguous(struct zone *zone); 217extern void clear_zone_contiguous(struct zone *zone);
204 218
205#else /* ! CONFIG_MEMORY_HOTPLUG */ 219#else /* ! CONFIG_MEMORY_HOTPLUG */
220#define pfn_to_online_page(pfn) \
221({ \
222 struct page *___page = NULL; \
223 if (pfn_valid(pfn)) \
224 ___page = pfn_to_page(pfn); \
225 ___page; \
226 })
227
206/* 228/*
207 * Stub functions for when hotplug is off 229 * Stub functions for when hotplug is off
208 */ 230 */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 976a1202bec1..2aaf7e08c5a8 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1144,9 +1144,10 @@ extern unsigned long usemap_size(void);
1144 */ 1144 */
1145#define SECTION_MARKED_PRESENT (1UL<<0) 1145#define SECTION_MARKED_PRESENT (1UL<<0)
1146#define SECTION_HAS_MEM_MAP (1UL<<1) 1146#define SECTION_HAS_MEM_MAP (1UL<<1)
1147#define SECTION_MAP_LAST_BIT (1UL<<2) 1147#define SECTION_IS_ONLINE (1UL<<2)
1148#define SECTION_MAP_LAST_BIT (1UL<<3)
1148#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) 1149#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1149#define SECTION_NID_SHIFT 2 1150#define SECTION_NID_SHIFT 3
1150 1151
1151static inline struct page *__section_mem_map_addr(struct mem_section *section) 1152static inline struct page *__section_mem_map_addr(struct mem_section *section)
1152{ 1153{
@@ -1175,6 +1176,23 @@ static inline int valid_section_nr(unsigned long nr)
1175 return valid_section(__nr_to_section(nr)); 1176 return valid_section(__nr_to_section(nr));
1176} 1177}
1177 1178
1179static inline int online_section(struct mem_section *section)
1180{
1181 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1182}
1183
1184static inline int online_section_nr(unsigned long nr)
1185{
1186 return online_section(__nr_to_section(nr));
1187}
1188
1189#ifdef CONFIG_MEMORY_HOTPLUG
1190void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1191#ifdef CONFIG_MEMORY_HOTREMOVE
1192void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1193#endif
1194#endif
1195
1178static inline struct mem_section *__pfn_to_section(unsigned long pfn) 1196static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1179{ 1197{
1180 return __nr_to_section(pfn_to_section_nr(pfn)); 1198 return __nr_to_section(pfn_to_section_nr(pfn));
@@ -1253,10 +1271,15 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1253#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL 1271#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1254/* 1272/*
1255 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap 1273 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
1256 * associated with it or not. In FLATMEM, it is expected that holes always 1274 * associated with it or not. This means that a struct page exists for this
1257 * have valid memmap as long as there is valid PFNs either side of the hole. 1275 * pfn. The caller cannot assume the page is fully initialized in general.
1258 * In SPARSEMEM, it is assumed that a valid section has a memmap for the 1276 * Hotplugable pages might not have been onlined yet. pfn_to_online_page()
1259 * entire section. 1277 * will ensure the struct page is fully online and initialized. Special pages
1278 * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly.
1279 *
1280 * In FLATMEM, it is expected that holes always have valid memmap as long as
1281 * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed
1282 * that a valid section has a memmap for the entire section.
1260 * 1283 *
1261 * However, an ARM, and maybe other embedded architectures in the future 1284 * However, an ARM, and maybe other embedded architectures in the future
1262 * free memmap backing holes to save memory on the assumption the memmap is 1285 * free memmap backing holes to save memory on the assumption the memmap is
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index caa58338d121..b2ebe9ad7f6c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -929,12 +929,16 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
929 unsigned long i; 929 unsigned long i;
930 unsigned long onlined_pages = *(unsigned long *)arg; 930 unsigned long onlined_pages = *(unsigned long *)arg;
931 struct page *page; 931 struct page *page;
932
932 if (PageReserved(pfn_to_page(start_pfn))) 933 if (PageReserved(pfn_to_page(start_pfn)))
933 for (i = 0; i < nr_pages; i++) { 934 for (i = 0; i < nr_pages; i++) {
934 page = pfn_to_page(start_pfn + i); 935 page = pfn_to_page(start_pfn + i);
935 (*online_page_callback)(page); 936 (*online_page_callback)(page);
936 onlined_pages++; 937 onlined_pages++;
937 } 938 }
939
940 online_mem_sections(start_pfn, start_pfn + nr_pages);
941
938 *(unsigned long *)arg = onlined_pages; 942 *(unsigned long *)arg = onlined_pages;
939 return 0; 943 return 0;
940} 944}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 73f854344735..387f20db217c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1365,7 +1365,9 @@ struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1365 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 1365 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1366 return NULL; 1366 return NULL;
1367 1367
1368 start_page = pfn_to_page(start_pfn); 1368 start_page = pfn_to_online_page(start_pfn);
1369 if (!start_page)
1370 return NULL;
1369 1371
1370 if (page_zone(start_page) != zone) 1372 if (page_zone(start_page) != zone)
1371 return NULL; 1373 return NULL;
@@ -7656,6 +7658,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
7656 break; 7658 break;
7657 if (pfn == end_pfn) 7659 if (pfn == end_pfn)
7658 return; 7660 return;
7661 offline_mem_sections(pfn, end_pfn);
7659 zone = page_zone(pfn_to_page(pfn)); 7662 zone = page_zone(pfn_to_page(pfn));
7660 spin_lock_irqsave(&zone->lock, flags); 7663 spin_lock_irqsave(&zone->lock, flags);
7661 pfn = start_pfn; 7664 pfn = start_pfn;
diff --git a/mm/sparse.c b/mm/sparse.c
index 5032c9a619de..9d7fd666015e 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -222,7 +222,8 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
222 222
223 ms = __nr_to_section(section); 223 ms = __nr_to_section(section);
224 if (!ms->section_mem_map) { 224 if (!ms->section_mem_map) {
225 ms->section_mem_map = sparse_encode_early_nid(nid); 225 ms->section_mem_map = sparse_encode_early_nid(nid) |
226 SECTION_IS_ONLINE;
226 section_mark_present(ms); 227 section_mark_present(ms);
227 } 228 }
228 } 229 }
@@ -622,6 +623,48 @@ void __init sparse_init(void)
622} 623}
623 624
624#ifdef CONFIG_MEMORY_HOTPLUG 625#ifdef CONFIG_MEMORY_HOTPLUG
626
627/* Mark all memory sections within the pfn range as online */
628void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
629{
630 unsigned long pfn;
631
632 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
633 unsigned long section_nr = pfn_to_section_nr(start_pfn);
634 struct mem_section *ms;
635
636 /* onlining code should never touch invalid ranges */
637 if (WARN_ON(!valid_section_nr(section_nr)))
638 continue;
639
640 ms = __nr_to_section(section_nr);
641 ms->section_mem_map |= SECTION_IS_ONLINE;
642 }
643}
644
645#ifdef CONFIG_MEMORY_HOTREMOVE
646/* Mark all memory sections within the pfn range as online */
647void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
648{
649 unsigned long pfn;
650
651 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
652 unsigned long section_nr = pfn_to_section_nr(start_pfn);
653 struct mem_section *ms;
654
655 /*
656 * TODO this needs some double checking. Offlining code makes
657 * sure to check pfn_valid but those checks might be just bogus
658 */
659 if (WARN_ON(!valid_section_nr(section_nr)))
660 continue;
661
662 ms = __nr_to_section(section_nr);
663 ms->section_mem_map &= ~SECTION_IS_ONLINE;
664 }
665}
666#endif
667
625#ifdef CONFIG_SPARSEMEM_VMEMMAP 668#ifdef CONFIG_SPARSEMEM_VMEMMAP
626static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) 669static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
627{ 670{