aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2013-04-29 18:07:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 18:54:35 -0400
commit8e2cdbcb86b0abefc3d07922c48edb01fece3c56 (patch)
tree4d666e779a1c6954313ea50abcadae20bb8ff230 /arch/x86/mm
parente8216da5c719c3bfec12779b6faf456009f01c44 (diff)
x86-64: fall back to regular page vmemmap on allocation failure
Memory hotplug can happen on a machine under load, memory shortness and fragmentation, so huge page allocations for the vmemmap are not guaranteed to succeed. Try to fall back to regular pages before failing the hotplug event completely. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Ben Hutchings <ben@decadent.org.uk> Cc: Bernhard Schmidt <Bernhard.Schmidt@lrz.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: David Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init_64.c46
1 files changed, 26 insertions, 20 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 9f6347c468b0..71ff55a1b287 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1303,31 +1303,37 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
1303 1303
1304 pmd = pmd_offset(pud, addr); 1304 pmd = pmd_offset(pud, addr);
1305 if (pmd_none(*pmd)) { 1305 if (pmd_none(*pmd)) {
1306 pte_t entry;
1307 void *p; 1306 void *p;
1308 1307
1309 p = vmemmap_alloc_block_buf(PMD_SIZE, node); 1308 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1310 if (!p) 1309 if (p) {
1311 return -ENOMEM; 1310 pte_t entry;
1312 1311
1313 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, 1312 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1314 PAGE_KERNEL_LARGE); 1313 PAGE_KERNEL_LARGE);
1315 set_pmd(pmd, __pmd(pte_val(entry))); 1314 set_pmd(pmd, __pmd(pte_val(entry)));
1316 1315
1317 /* check to see if we have contiguous blocks */ 1316 /* check to see if we have contiguous blocks */
1318 if (p_end != p || node_start != node) { 1317 if (p_end != p || node_start != node) {
1319 if (p_start) 1318 if (p_start)
1320 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", 1319 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1321 addr_start, addr_end-1, p_start, p_end-1, node_start); 1320 addr_start, addr_end-1, p_start, p_end-1, node_start);
1322 addr_start = addr; 1321 addr_start = addr;
1323 node_start = node; 1322 node_start = node;
1324 p_start = p; 1323 p_start = p;
1325 } 1324 }
1326 1325
1327 addr_end = addr + PMD_SIZE; 1326 addr_end = addr + PMD_SIZE;
1328 p_end = p + PMD_SIZE; 1327 p_end = p + PMD_SIZE;
1329 } else 1328 continue;
1329 }
1330 } else if (pmd_large(*pmd)) {
1330 vmemmap_verify((pte_t *)pmd, node, addr, next); 1331 vmemmap_verify((pte_t *)pmd, node, addr, next);
1332 continue;
1333 }
1334 pr_warn_once("vmemmap: falling back to regular page backing\n");
1335 if (vmemmap_populate_basepages(addr, next, node))
1336 return -ENOMEM;
1331 } 1337 }
1332 return 0; 1338 return 0;
1333} 1339}