diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-04-30 01:41:48 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-05-15 06:49:25 -0400 |
commit | cec08e7a948326b01555be6311480aa08e637de2 (patch) | |
tree | a08e1d91c9a0c5eaa4e81036a3d7f992b2de1745 /arch/powerpc/mm/init_64.c | |
parent | 08fcf1d61193d7b7779aa6d7388535e26e064a0b (diff) |
[POWERPC] vmemmap fixes to use smaller pages
This changes vmemmap to use a different region (region 0xf) of the
address space, and to configure the page size of that region
dynamically at boot.
The problem with the current approach of always using 16M pages is that
it's not well suited to machines that have small amounts of memory such
as small partitions on pseries, or PS3's.
In fact, on the PS3, failure to allocate the 16M page backing vmmemmap
tends to prevent hotplugging the HV's "additional" memory, thus limiting
the available memory even more, from my experience down to something
like 80M total, which makes it really not very useable.
The logic used by my match to choose the vmemmap page size is:
- If 16M pages are available and there's 1G or more RAM at boot,
use that size.
- Else if 64K pages are available, use that
- Else use 4K pages
I've tested on a POWER6 (16M pages) and on an iSeries POWER3 (4K pages)
and it seems to work fine.
Note that I intend to change the way we organize the kernel regions &
SLBs so the actual region will change from 0xf back to something else at
one point, as I simplify the SLB miss handler, but that will be for a
later patch.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/init_64.c')
-rw-r--r-- | arch/powerpc/mm/init_64.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index c5ac532a0161..6aa65375abf5 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -19,6 +19,8 @@ | |||
19 | * | 19 | * |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #undef DEBUG | ||
23 | |||
22 | #include <linux/signal.h> | 24 | #include <linux/signal.h> |
23 | #include <linux/sched.h> | 25 | #include <linux/sched.h> |
24 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
@@ -208,12 +210,12 @@ int __meminit vmemmap_populated(unsigned long start, int page_size) | |||
208 | } | 210 | } |
209 | 211 | ||
210 | int __meminit vmemmap_populate(struct page *start_page, | 212 | int __meminit vmemmap_populate(struct page *start_page, |
211 | unsigned long nr_pages, int node) | 213 | unsigned long nr_pages, int node) |
212 | { | 214 | { |
213 | unsigned long mode_rw; | 215 | unsigned long mode_rw; |
214 | unsigned long start = (unsigned long)start_page; | 216 | unsigned long start = (unsigned long)start_page; |
215 | unsigned long end = (unsigned long)(start_page + nr_pages); | 217 | unsigned long end = (unsigned long)(start_page + nr_pages); |
216 | unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift; | 218 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
217 | 219 | ||
218 | mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; | 220 | mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; |
219 | 221 | ||
@@ -235,11 +237,11 @@ int __meminit vmemmap_populate(struct page *start_page, | |||
235 | start, p, __pa(p)); | 237 | start, p, __pa(p)); |
236 | 238 | ||
237 | mapped = htab_bolt_mapping(start, start + page_size, | 239 | mapped = htab_bolt_mapping(start, start + page_size, |
238 | __pa(p), mode_rw, mmu_linear_psize, | 240 | __pa(p), mode_rw, mmu_vmemmap_psize, |
239 | mmu_kernel_ssize); | 241 | mmu_kernel_ssize); |
240 | BUG_ON(mapped < 0); | 242 | BUG_ON(mapped < 0); |
241 | } | 243 | } |
242 | 244 | ||
243 | return 0; | 245 | return 0; |
244 | } | 246 | } |
245 | #endif | 247 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |