aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/memory.c
diff options
context:
space:
mode:
authorPavel Tatashin <pasha.tatashin@oracle.com>2018-04-05 19:22:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-06 00:36:25 -0400
commitb77eab7079d9e477489d2416cceda05d3c1cf21f (patch)
treedb51854b65b46eb9010567db52a5fd7fa9b73be4 /drivers/base/memory.c
parentf165b378bbdf6c8afd950060fc3cbc935bb890c6 (diff)
mm/memory_hotplug: optimize probe routine
When memory is hotplugged pages_correctly_reserved() is called to verify that the added memory is present, this routine traverses through every struct page and verifies that PageReserved() is set. This is a slow operation especially if a large amount of memory is added. Instead of checking every page, it is enough to simply check that the section is present, has mapping (struct page array is allocated), and the mapping is online. In addition, we should not excpect that probe routine sets flags in struct page, as the struct pages have not yet been initialized. The initialization should be done in __init_single_page(), the same as during boot. Link: http://lkml.kernel.org/r/20180215165920.8570-5-pasha.tatashin@oracle.com Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Baoquan He <bhe@redhat.com> Cc: Bharata B Rao <bharata@linux.vnet.ibm.com> Cc: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Steven Sistare <steven.sistare@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/base/memory.c')
-rw-r--r--drivers/base/memory.c36
1 files changed, 20 insertions, 16 deletions
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index fe4b24f05f6a..deb3f029b451 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -187,13 +187,14 @@ int memory_isolate_notify(unsigned long val, void *v)
187} 187}
188 188
189/* 189/*
190 * The probe routines leave the pages reserved, just as the bootmem code does. 190 * The probe routines leave the pages uninitialized, just as the bootmem code
191 * Make sure they're still that way. 191 * does. Make sure we do not access them, but instead use only information from
192 * within sections.
192 */ 193 */
193static bool pages_correctly_reserved(unsigned long start_pfn) 194static bool pages_correctly_probed(unsigned long start_pfn)
194{ 195{
195 int i, j; 196 unsigned long section_nr = pfn_to_section_nr(start_pfn);
196 struct page *page; 197 unsigned long section_nr_end = section_nr + sections_per_block;
197 unsigned long pfn = start_pfn; 198 unsigned long pfn = start_pfn;
198 199
199 /* 200 /*
@@ -201,21 +202,24 @@ static bool pages_correctly_reserved(unsigned long start_pfn)
201 * SPARSEMEM_VMEMMAP. We lookup the page once per section 202 * SPARSEMEM_VMEMMAP. We lookup the page once per section
202 * and assume memmap is contiguous within each section 203 * and assume memmap is contiguous within each section
203 */ 204 */
204 for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) { 205 for (; section_nr < section_nr_end; section_nr++) {
205 if (WARN_ON_ONCE(!pfn_valid(pfn))) 206 if (WARN_ON_ONCE(!pfn_valid(pfn)))
206 return false; 207 return false;
207 page = pfn_to_page(pfn);
208
209 for (j = 0; j < PAGES_PER_SECTION; j++) {
210 if (PageReserved(page + j))
211 continue;
212
213 printk(KERN_WARNING "section number %ld page number %d "
214 "not reserved, was it already online?\n",
215 pfn_to_section_nr(pfn), j);
216 208
209 if (!present_section_nr(section_nr)) {
210 pr_warn("section %ld pfn[%lx, %lx) not present",
211 section_nr, pfn, pfn + PAGES_PER_SECTION);
212 return false;
213 } else if (!valid_section_nr(section_nr)) {
214 pr_warn("section %ld pfn[%lx, %lx) no valid memmap",
215 section_nr, pfn, pfn + PAGES_PER_SECTION);
216 return false;
217 } else if (online_section_nr(section_nr)) {
218 pr_warn("section %ld pfn[%lx, %lx) is already online",
219 section_nr, pfn, pfn + PAGES_PER_SECTION);
217 return false; 220 return false;
218 } 221 }
222 pfn += PAGES_PER_SECTION;
219 } 223 }
220 224
221 return true; 225 return true;
@@ -237,7 +241,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
237 241
238 switch (action) { 242 switch (action) {
239 case MEM_ONLINE: 243 case MEM_ONLINE:
240 if (!pages_correctly_reserved(start_pfn)) 244 if (!pages_correctly_probed(start_pfn))
241 return -EBUSY; 245 return -EBUSY;
242 246
243 ret = online_pages(start_pfn, nr_pages, online_type); 247 ret = online_pages(start_pfn, nr_pages, online_type);