aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorKumar Gala <galak@kernel.crashing.org>2008-04-21 14:22:34 -0400
committerPaul Mackerras <paulus@samba.org>2008-04-24 06:58:01 -0400
commit37dd2badcfcec35f5e21a0926968d77a404f03c3 (patch)
treed659c5713a15b4b70e3f49cbe58c9dfeb0e7c117 /arch
parent96f1bb8a412aec3fc16306ef07c5bdb426edb615 (diff)
[POWERPC] 85xx: Add support for relocatable kernel (and booting at non-zero)
Added support to allow an 85xx kernel to be run from a non-zero physical address (useful for cooperative asymmetric multiprocessing situations and kdump). The support can be configured at compile time by setting CONFIG_PAGE_OFFSET, CONFIG_KERNEL_START, and CONFIG_PHYSICAL_START as desired. Alternatively, the kernel build can set CONFIG_RELOCATABLE. Setting this config option causes the kernel to determine at runtime the physical addresses of CONFIG_PAGE_OFFSET and CONFIG_KERNEL_START. If CONFIG_RELOCATABLE is set, then CONFIG_PHYSICAL_START has no meaning. However, CONFIG_PHYSICAL_START will always be used to set the LOAD program header physical address field in the resulting ELF image. Currently we are limited to running at a physical address that is a multiple of 256M. This is due to how we map TLBs to cover lowmem. This should be fixed to allow 64M or maybe even 16M alignment in the future. It is considered an error to try and run a kernel at a non-aligned physical address. All the magic for this support is accomplished by proper initialization of the kernel memory subsystem and use of ARCH_PFN_OFFSET. The use of ARCH_PFN_OFFSET only affects normal memory and not IO mappings. ioremap uses map_page and isn't affected by ARCH_PFN_OFFSET. /dev/mem continues to allow access to any physical address in the system regardless of how CONFIG_PHYSICAL_START is set. Signed-off-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig69
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S11
-rw-r--r--arch/powerpc/kernel/prom.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c2
-rw-r--r--arch/powerpc/mm/init_32.c5
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/mm/mem.c5
8 files changed, 92 insertions, 9 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4bb2e9310a56..fdc755a05f70 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -656,21 +656,76 @@ config LOWMEM_SIZE
656 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL 656 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
657 default "0x30000000" 657 default "0x30000000"
658 658
659config RELOCATABLE
660 bool "Build a relocatable kernel (EXPERIMENTAL)"
661 depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && FSL_BOOKE
662 help
663 This builds a kernel image that is capable of running at the
664 location the kernel is loaded at (some alignment restrictions may
665 exist).
666
667 One use is for the kexec on panic case where the recovery kernel
668 must live at a different physical address than the primary
669 kernel.
670
671 Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address
672 it has been loaded at and the compile time physical addresses
673 CONFIG_PHYSICAL_START is ignored. However CONFIG_PHYSICAL_START
674 setting can still be useful to bootwrappers that need to know the
675 load location of the kernel (eg. u-boot/mkimage).
676
677config PAGE_OFFSET_BOOL
678 bool "Set custom page offset address"
679 depends on ADVANCED_OPTIONS
680 help
681 This option allows you to set the kernel virtual address at which
682 the kernel will map low memory. This can be useful in optimizing
683 the virtual memory layout of the system.
684
685 Say N here unless you know what you are doing.
686
687config PAGE_OFFSET
688 hex "Virtual address of memory base" if PAGE_OFFSET_BOOL
689 default "0xc0000000"
690
659config KERNEL_START_BOOL 691config KERNEL_START_BOOL
660 bool "Set custom kernel base address" 692 bool "Set custom kernel base address"
661 depends on ADVANCED_OPTIONS 693 depends on ADVANCED_OPTIONS
662 help 694 help
663 This option allows you to set the kernel virtual address at which 695 This option allows you to set the kernel virtual address at which
664 the kernel will map low memory (the kernel image will be linked at 696 the kernel will be loaded. Normally this should match PAGE_OFFSET
665 this address). This can be useful in optimizing the virtual memory 697 however there are times (like kdump) that one might not want them
666 layout of the system. 698 to be the same.
667 699
668 Say N here unless you know what you are doing. 700 Say N here unless you know what you are doing.
669 701
670config KERNEL_START 702config KERNEL_START
671 hex "Virtual address of kernel base" if KERNEL_START_BOOL 703 hex "Virtual address of kernel base" if KERNEL_START_BOOL
704 default PAGE_OFFSET if PAGE_OFFSET_BOOL
705 default "0xc2000000" if CRASH_DUMP
672 default "0xc0000000" 706 default "0xc0000000"
673 707
708config PHYSICAL_START_BOOL
709 bool "Set physical address where the kernel is loaded"
710 depends on ADVANCED_OPTIONS && FLATMEM && FSL_BOOKE
711 help
712 This gives the physical address where the kernel is loaded.
713
714 Say N here unless you know what you are doing.
715
716config PHYSICAL_START
717 hex "Physical address where the kernel is loaded" if PHYSICAL_START_BOOL
718 default "0x02000000" if PPC_STD_MMU && CRASH_DUMP
719 default "0x00000000"
720
721config PHYSICAL_ALIGN
722 hex
723 default "0x10000000" if FSL_BOOKE
724 help
725 This value puts the alignment restrictions on physical address
726 where kernel is loaded and run from. Kernel is compiled for an
727 address which meets above alignment restriction.
728
674config TASK_SIZE_BOOL 729config TASK_SIZE_BOOL
675 bool "Set custom user task size" 730 bool "Set custom user task size"
676 depends on ADVANCED_OPTIONS 731 depends on ADVANCED_OPTIONS
@@ -717,9 +772,17 @@ config PIN_TLB
717endmenu 772endmenu
718 773
719if PPC64 774if PPC64
775config PAGE_OFFSET
776 hex
777 default "0xc000000000000000"
720config KERNEL_START 778config KERNEL_START
721 hex 779 hex
780 default "0xc000000002000000" if CRASH_DUMP
722 default "0xc000000000000000" 781 default "0xc000000000000000"
782config PHYSICAL_START
783 hex
784 default "0x02000000" if CRASH_DUMP
785 default "0x00000000"
723endif 786endif
724 787
725source "net/Kconfig" 788source "net/Kconfig"
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 4ff744143566..e581524d85bc 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -371,6 +371,17 @@ skpinv: addi r6,r6,1 /* Increment */
371 371
372 bl early_init 372 bl early_init
373 373
374#ifdef CONFIG_RELOCATABLE
375 lis r3,kernstart_addr@ha
376 la r3,kernstart_addr@l(r3)
377#ifdef CONFIG_PHYS_64BIT
378 stw r23,0(r3)
379 stw r25,4(r3)
380#else
381 stw r25,0(r3)
382#endif
383#endif
384
374 mfspr r3,SPRN_TLB1CFG 385 mfspr r3,SPRN_TLB1CFG
375 andi. r3,r3,0xfff 386 andi. r3,r3,0xfff
376 lis r4,num_tlbcam_entries@ha 387 lis r4,num_tlbcam_entries@ha
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 3bfe7837e820..2aefe2a4129a 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -53,6 +53,7 @@
53#include <asm/pci-bridge.h> 53#include <asm/pci-bridge.h>
54#include <asm/phyp_dump.h> 54#include <asm/phyp_dump.h>
55#include <asm/kexec.h> 55#include <asm/kexec.h>
56#include <mm/mmu_decl.h>
56 57
57#ifdef DEBUG 58#ifdef DEBUG
58#define DBG(fmt...) printk(KERN_ERR fmt) 59#define DBG(fmt...) printk(KERN_ERR fmt)
@@ -978,7 +979,10 @@ static int __init early_init_dt_scan_memory(unsigned long node,
978 } 979 }
979#endif 980#endif
980 lmb_add(base, size); 981 lmb_add(base, size);
982
983 memstart_addr = min((u64)memstart_addr, base);
981 } 984 }
985
982 return 0; 986 return 0;
983} 987}
984 988
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 31ada9fdfc5c..153a48dc8f40 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -435,7 +435,7 @@ void __init setup_system(void)
435 printk("htab_address = 0x%p\n", htab_address); 435 printk("htab_address = 0x%p\n", htab_address);
436 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); 436 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
437#if PHYSICAL_START > 0 437#if PHYSICAL_START > 0
438 printk("physical_start = 0x%x\n", PHYSICAL_START); 438 printk("physical_start = 0x%lx\n", PHYSICAL_START);
439#endif 439#endif
440 printk("-----------------------------------------------------\n"); 440 printk("-----------------------------------------------------\n");
441 441
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index ada249bf9779..ce10e2b1b902 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -202,7 +202,7 @@ adjust_total_lowmem(void)
202 cam_max_size = max_lowmem_size; 202 cam_max_size = max_lowmem_size;
203 203
204 /* adjust lowmem size to max_lowmem_size */ 204 /* adjust lowmem size to max_lowmem_size */
205 ram = min(max_lowmem_size, total_lowmem); 205 ram = min(max_lowmem_size, (phys_addr_t)total_lowmem);
206 206
207 /* Calculate CAM values */ 207 /* Calculate CAM values */
208 __cam0 = 1UL << 2 * (__ilog2(ram) / 2); 208 __cam0 = 1UL << 2 * (__ilog2(ram) / 2);
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 47325f23c51f..578750e4ca88 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -59,7 +59,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
59unsigned long total_memory; 59unsigned long total_memory;
60unsigned long total_lowmem; 60unsigned long total_lowmem;
61 61
62phys_addr_t memstart_addr; 62phys_addr_t memstart_addr = (phys_addr_t)~0ull;
63EXPORT_SYMBOL(memstart_addr);
64phys_addr_t kernstart_addr;
65EXPORT_SYMBOL(kernstart_addr);
63phys_addr_t lowmem_end_addr; 66phys_addr_t lowmem_end_addr;
64 67
65int boot_mapsize; 68int boot_mapsize;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 698bd000f98b..c5ac532a0161 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -72,7 +72,8 @@
72#warning TASK_SIZE is smaller than it needs to be. 72#warning TASK_SIZE is smaller than it needs to be.
73#endif 73#endif
74 74
75phys_addr_t memstart_addr; 75phys_addr_t memstart_addr = ~0;
76phys_addr_t kernstart_addr;
76 77
77void free_initmem(void) 78void free_initmem(void)
78{ 79{
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 16def4dcff6d..0062e6b1c555 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -216,7 +216,7 @@ void __init do_init_bootmem(void)
216 unsigned long total_pages; 216 unsigned long total_pages;
217 int boot_mapsize; 217 int boot_mapsize;
218 218
219 max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 219 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
220 total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; 220 total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
221#ifdef CONFIG_HIGHMEM 221#ifdef CONFIG_HIGHMEM
222 total_pages = total_lowmem >> PAGE_SHIFT; 222 total_pages = total_lowmem >> PAGE_SHIFT;
@@ -232,7 +232,8 @@ void __init do_init_bootmem(void)
232 232
233 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 233 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
234 234
235 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); 235 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
236 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
236 237
237 /* Add active regions with valid PFNs */ 238 /* Add active regions with valid PFNs */
238 for (i = 0; i < lmb.memory.cnt; i++) { 239 for (i = 0; i < lmb.memory.cnt; i++) {