diff options
author | David S. Miller <davem@davemloft.net> | 2008-03-21 20:01:38 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-03-21 20:01:38 -0400 |
commit | 64658743fdd40021e3ac91e8ff260ad06578dd23 (patch) | |
tree | da9bd83e54702156d66ae9b6d282d610376feda8 /arch/sparc64/mm/init.c | |
parent | 4cfea5a7dfcc2766251e50ca30271a782d5004ad (diff) |
[SPARC64]: Remove most limitations to kernel image size.
Currently kernel images are limited to 8MB in size, and this causes
problems especially when enabling features that take up a lot of
kernel image space such as lockdep.
The code now will align the kernel image size up to 4MB and map that
many locked TLB entries. So, the only practical limitation is the
number of available locked TLB entries which is 16 on Cheetah and 64
on pre-Cheetah sparc64 cpus. Niagara cpus don't actually have hw
locked TLB entry support. Rather, the hypervisor transparently
provides support for "locked" TLB entries since it runs with physical
addressing and does the initial TLB miss processing.
Fully utilizing this change requires some help from SILO, a patch for
which will be submitted to the maintainer. Essentially, SILO will
only currently map up to 8MB for the kernel image and that needs to be
increased.
Note that neither this patch nor the SILO bits will help with network
booting. The openfirmware code will only map up to a certain amount
of kernel image during a network boot and there isn't much we can to
about that other than to implemented a layered network booting
facility. Solaris has this, and calls it "wanboot" and we may
implement something similar at some point.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r-- | arch/sparc64/mm/init.c | 38 |
1 files changed, 14 insertions, 24 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index b5c30416fdac..466fd6cffac9 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -166,7 +166,7 @@ unsigned long sparc64_kern_pri_context __read_mostly; | |||
166 | unsigned long sparc64_kern_pri_nuc_bits __read_mostly; | 166 | unsigned long sparc64_kern_pri_nuc_bits __read_mostly; |
167 | unsigned long sparc64_kern_sec_context __read_mostly; | 167 | unsigned long sparc64_kern_sec_context __read_mostly; |
168 | 168 | ||
169 | int bigkernel = 0; | 169 | int num_kernel_image_mappings; |
170 | 170 | ||
171 | #ifdef CONFIG_DEBUG_DCFLUSH | 171 | #ifdef CONFIG_DEBUG_DCFLUSH |
172 | atomic_t dcpage_flushes = ATOMIC_INIT(0); | 172 | atomic_t dcpage_flushes = ATOMIC_INIT(0); |
@@ -572,7 +572,7 @@ static unsigned long kern_large_tte(unsigned long paddr); | |||
572 | static void __init remap_kernel(void) | 572 | static void __init remap_kernel(void) |
573 | { | 573 | { |
574 | unsigned long phys_page, tte_vaddr, tte_data; | 574 | unsigned long phys_page, tte_vaddr, tte_data; |
575 | int tlb_ent = sparc64_highest_locked_tlbent(); | 575 | int i, tlb_ent = sparc64_highest_locked_tlbent(); |
576 | 576 | ||
577 | tte_vaddr = (unsigned long) KERNBASE; | 577 | tte_vaddr = (unsigned long) KERNBASE; |
578 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | 578 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; |
@@ -582,27 +582,20 @@ static void __init remap_kernel(void) | |||
582 | 582 | ||
583 | /* Now lock us into the TLBs via Hypervisor or OBP. */ | 583 | /* Now lock us into the TLBs via Hypervisor or OBP. */ |
584 | if (tlb_type == hypervisor) { | 584 | if (tlb_type == hypervisor) { |
585 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); | 585 | for (i = 0; i < num_kernel_image_mappings; i++) { |
586 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); | ||
587 | if (bigkernel) { | ||
588 | tte_vaddr += 0x400000; | ||
589 | tte_data += 0x400000; | ||
590 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); | 586 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); |
591 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); | 587 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); |
588 | tte_vaddr += 0x400000; | ||
589 | tte_data += 0x400000; | ||
592 | } | 590 | } |
593 | } else { | 591 | } else { |
594 | prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); | 592 | for (i = 0; i < num_kernel_image_mappings; i++) { |
595 | prom_itlb_load(tlb_ent, tte_data, tte_vaddr); | 593 | prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); |
596 | if (bigkernel) { | 594 | prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); |
597 | tlb_ent -= 1; | 595 | tte_vaddr += 0x400000; |
598 | prom_dtlb_load(tlb_ent, | 596 | tte_data += 0x400000; |
599 | tte_data + 0x400000, | ||
600 | tte_vaddr + 0x400000); | ||
601 | prom_itlb_load(tlb_ent, | ||
602 | tte_data + 0x400000, | ||
603 | tte_vaddr + 0x400000); | ||
604 | } | 597 | } |
605 | sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; | 598 | sparc64_highest_unlocked_tlb_ent = tlb_ent - i; |
606 | } | 599 | } |
607 | if (tlb_type == cheetah_plus) { | 600 | if (tlb_type == cheetah_plus) { |
608 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | | 601 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | |
@@ -1352,12 +1345,9 @@ void __init paging_init(void) | |||
1352 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); | 1345 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); |
1353 | 1346 | ||
1354 | real_end = (unsigned long)_end; | 1347 | real_end = (unsigned long)_end; |
1355 | if ((real_end > ((unsigned long)KERNBASE + 0x400000))) | 1348 | num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); |
1356 | bigkernel = 1; | 1349 | printk("Kernel: Using %d locked TLB entries for main kernel image.\n", |
1357 | if ((real_end > ((unsigned long)KERNBASE + 0x800000))) { | 1350 | num_kernel_image_mappings); |
1358 | prom_printf("paging_init: Kernel > 8MB, too large.\n"); | ||
1359 | prom_halt(); | ||
1360 | } | ||
1361 | 1351 | ||
1362 | /* Set kernel pgd to upper alias so physical page computations | 1352 | /* Set kernel pgd to upper alias so physical page computations |
1363 | * work. | 1353 | * work. |