diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2013-04-29 18:07:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 18:54:35 -0400 |
commit | e8216da5c719c3bfec12779b6faf456009f01c44 (patch) | |
tree | 51b6eec6b15388c9b4c2acf1fc95dfcbaf6b79f8 /arch/x86/mm | |
parent | 6c7a2ca4c11bcc00f8b4b840a4fca694d20e8905 (diff) |
x86-64: use vmemmap_populate_basepages() for !pse setups
We already have generic code to allocate vmemmap with regular pages, use
it.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Bernhard Schmidt <Bernhard.Schmidt@lrz.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: David Miller <davem@davemloft.net>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/init_64.c | 78 |
1 files changed, 38 insertions, 40 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 8c696c9cc6c1..9f6347c468b0 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -1281,7 +1281,8 @@ static long __meminitdata addr_start, addr_end; | |||
1281 | static void __meminitdata *p_start, *p_end; | 1281 | static void __meminitdata *p_start, *p_end; |
1282 | static int __meminitdata node_start; | 1282 | static int __meminitdata node_start; |
1283 | 1283 | ||
1284 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | 1284 | static int __meminit vmemmap_populate_hugepages(unsigned long start, |
1285 | unsigned long end, int node) | ||
1285 | { | 1286 | { |
1286 | unsigned long addr; | 1287 | unsigned long addr; |
1287 | unsigned long next; | 1288 | unsigned long next; |
@@ -1290,7 +1291,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | |||
1290 | pmd_t *pmd; | 1291 | pmd_t *pmd; |
1291 | 1292 | ||
1292 | for (addr = start; addr < end; addr = next) { | 1293 | for (addr = start; addr < end; addr = next) { |
1293 | void *p = NULL; | 1294 | next = pmd_addr_end(addr, end); |
1294 | 1295 | ||
1295 | pgd = vmemmap_pgd_populate(addr, node); | 1296 | pgd = vmemmap_pgd_populate(addr, node); |
1296 | if (!pgd) | 1297 | if (!pgd) |
@@ -1300,53 +1301,50 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | |||
1300 | if (!pud) | 1301 | if (!pud) |
1301 | return -ENOMEM; | 1302 | return -ENOMEM; |
1302 | 1303 | ||
1303 | if (!cpu_has_pse) { | 1304 | pmd = pmd_offset(pud, addr); |
1304 | next = (addr + PAGE_SIZE) & PAGE_MASK; | 1305 | if (pmd_none(*pmd)) { |
1305 | pmd = vmemmap_pmd_populate(pud, addr, node); | 1306 | pte_t entry; |
1306 | 1307 | void *p; | |
1307 | if (!pmd) | ||
1308 | return -ENOMEM; | ||
1309 | |||
1310 | p = vmemmap_pte_populate(pmd, addr, node); | ||
1311 | 1308 | ||
1309 | p = vmemmap_alloc_block_buf(PMD_SIZE, node); | ||
1312 | if (!p) | 1310 | if (!p) |
1313 | return -ENOMEM; | 1311 | return -ENOMEM; |
1314 | } else { | ||
1315 | next = pmd_addr_end(addr, end); | ||
1316 | |||
1317 | pmd = pmd_offset(pud, addr); | ||
1318 | if (pmd_none(*pmd)) { | ||
1319 | pte_t entry; | ||
1320 | |||
1321 | p = vmemmap_alloc_block_buf(PMD_SIZE, node); | ||
1322 | if (!p) | ||
1323 | return -ENOMEM; | ||
1324 | |||
1325 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, | ||
1326 | PAGE_KERNEL_LARGE); | ||
1327 | set_pmd(pmd, __pmd(pte_val(entry))); | ||
1328 | |||
1329 | /* check to see if we have contiguous blocks */ | ||
1330 | if (p_end != p || node_start != node) { | ||
1331 | if (p_start) | ||
1332 | printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", | ||
1333 | addr_start, addr_end-1, p_start, p_end-1, node_start); | ||
1334 | addr_start = addr; | ||
1335 | node_start = node; | ||
1336 | p_start = p; | ||
1337 | } | ||
1338 | 1312 | ||
1339 | addr_end = addr + PMD_SIZE; | 1313 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, |
1340 | p_end = p + PMD_SIZE; | 1314 | PAGE_KERNEL_LARGE); |
1341 | } else | 1315 | set_pmd(pmd, __pmd(pte_val(entry))); |
1342 | vmemmap_verify((pte_t *)pmd, node, addr, next); | 1316 | |
1343 | } | 1317 | /* check to see if we have contiguous blocks */ |
1318 | if (p_end != p || node_start != node) { | ||
1319 | if (p_start) | ||
1320 | printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", | ||
1321 | addr_start, addr_end-1, p_start, p_end-1, node_start); | ||
1322 | addr_start = addr; | ||
1323 | node_start = node; | ||
1324 | p_start = p; | ||
1325 | } | ||
1344 | 1326 | ||
1327 | addr_end = addr + PMD_SIZE; | ||
1328 | p_end = p + PMD_SIZE; | ||
1329 | } else | ||
1330 | vmemmap_verify((pte_t *)pmd, node, addr, next); | ||
1345 | } | 1331 | } |
1346 | sync_global_pgds(start, end - 1); | ||
1347 | return 0; | 1332 | return 0; |
1348 | } | 1333 | } |
1349 | 1334 | ||
1335 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | ||
1336 | { | ||
1337 | int err; | ||
1338 | |||
1339 | if (cpu_has_pse) | ||
1340 | err = vmemmap_populate_hugepages(start, end, node); | ||
1341 | else | ||
1342 | err = vmemmap_populate_basepages(start, end, node); | ||
1343 | if (!err) | ||
1344 | sync_global_pgds(start, end - 1); | ||
1345 | return err; | ||
1346 | } | ||
1347 | |||
1350 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) | 1348 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) |
1351 | void register_page_bootmem_memmap(unsigned long section_nr, | 1349 | void register_page_bootmem_memmap(unsigned long section_nr, |
1352 | struct page *start_page, unsigned long size) | 1350 | struct page *start_page, unsigned long size) |