aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hash_utils_64.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-04-30 01:41:48 -0400
committerPaul Mackerras <paulus@samba.org>2008-05-15 06:49:25 -0400
commitcec08e7a948326b01555be6311480aa08e637de2 (patch)
treea08e1d91c9a0c5eaa4e81036a3d7f992b2de1745 /arch/powerpc/mm/hash_utils_64.c
parent08fcf1d61193d7b7779aa6d7388535e26e064a0b (diff)
[POWERPC] vmemmap fixes to use smaller pages
This changes vmemmap to use a different region (region 0xf) of the address space, and to configure the page size of that region dynamically at boot. The problem with the current approach of always using 16M pages is that it's not well suited to machines that have small amounts of memory such as small partitions on pseries, or PS3's. In fact, on the PS3, failure to allocate the 16M page backing vmmemmap tends to prevent hotplugging the HV's "additional" memory, thus limiting the available memory even more, from my experience down to something like 80M total, which makes it really not very useable. The logic used by my match to choose the vmemmap page size is: - If 16M pages are available and there's 1G or more RAM at boot, use that size. - Else if 64K pages are available, use that - Else use 4K pages I've tested on a POWER6 (16M pages) and on an iSeries POWER3 (4K pages) and it seems to work fine. Note that I intend to change the way we organize the kernel regions & SLBs so the actual region will change from 0xf back to something else at one point, as I simplify the SLB miss handler, but that will be for a later patch. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/hash_utils_64.c')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 2b5a399f6fa6..0f2d239d94c4 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -94,6 +94,9 @@ unsigned long htab_hash_mask;
94int mmu_linear_psize = MMU_PAGE_4K; 94int mmu_linear_psize = MMU_PAGE_4K;
95int mmu_virtual_psize = MMU_PAGE_4K; 95int mmu_virtual_psize = MMU_PAGE_4K;
96int mmu_vmalloc_psize = MMU_PAGE_4K; 96int mmu_vmalloc_psize = MMU_PAGE_4K;
97#ifdef CONFIG_SPARSEMEM_VMEMMAP
98int mmu_vmemmap_psize = MMU_PAGE_4K;
99#endif
97int mmu_io_psize = MMU_PAGE_4K; 100int mmu_io_psize = MMU_PAGE_4K;
98int mmu_kernel_ssize = MMU_SEGSIZE_256M; 101int mmu_kernel_ssize = MMU_SEGSIZE_256M;
99int mmu_highuser_ssize = MMU_SEGSIZE_256M; 102int mmu_highuser_ssize = MMU_SEGSIZE_256M;
@@ -387,11 +390,32 @@ static void __init htab_init_page_sizes(void)
387 } 390 }
388#endif /* CONFIG_PPC_64K_PAGES */ 391#endif /* CONFIG_PPC_64K_PAGES */
389 392
393#ifdef CONFIG_SPARSEMEM_VMEMMAP
394 /* We try to use 16M pages for vmemmap if that is supported
395 * and we have at least 1G of RAM at boot
396 */
397 if (mmu_psize_defs[MMU_PAGE_16M].shift &&
398 lmb_phys_mem_size() >= 0x40000000)
399 mmu_vmemmap_psize = MMU_PAGE_16M;
400 else if (mmu_psize_defs[MMU_PAGE_64K].shift)
401 mmu_vmemmap_psize = MMU_PAGE_64K;
402 else
403 mmu_vmemmap_psize = MMU_PAGE_4K;
404#endif /* CONFIG_SPARSEMEM_VMEMMAP */
405
390 printk(KERN_DEBUG "Page orders: linear mapping = %d, " 406 printk(KERN_DEBUG "Page orders: linear mapping = %d, "
391 "virtual = %d, io = %d\n", 407 "virtual = %d, io = %d"
408#ifdef CONFIG_SPARSEMEM_VMEMMAP
409 ", vmemmap = %d"
410#endif
411 "\n",
392 mmu_psize_defs[mmu_linear_psize].shift, 412 mmu_psize_defs[mmu_linear_psize].shift,
393 mmu_psize_defs[mmu_virtual_psize].shift, 413 mmu_psize_defs[mmu_virtual_psize].shift,
394 mmu_psize_defs[mmu_io_psize].shift); 414 mmu_psize_defs[mmu_io_psize].shift
415#ifdef CONFIG_SPARSEMEM_VMEMMAP
416 ,mmu_psize_defs[mmu_vmemmap_psize].shift
417#endif
418 );
395 419
396#ifdef CONFIG_HUGETLB_PAGE 420#ifdef CONFIG_HUGETLB_PAGE
397 /* Init large page size. Currently, we pick 16M or 1M depending 421 /* Init large page size. Currently, we pick 16M or 1M depending