aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/Kconfig69
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S11
-rw-r--r--arch/powerpc/kernel/prom.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c2
-rw-r--r--arch/powerpc/mm/init_32.c5
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/mm/mem.c5
-rw-r--r--include/asm-powerpc/kdump.h5
-rw-r--r--include/asm-powerpc/page.h45
-rw-r--r--include/asm-powerpc/page_32.h6
11 files changed, 135 insertions, 22 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4bb2e9310a56..fdc755a05f70 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -656,21 +656,76 @@ config LOWMEM_SIZE
656 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL 656 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
657 default "0x30000000" 657 default "0x30000000"
658 658
659config RELOCATABLE
660 bool "Build a relocatable kernel (EXPERIMENTAL)"
661 depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && FSL_BOOKE
662 help
663 This builds a kernel image that is capable of running at the
664 location the kernel is loaded at (some alignment restrictions may
665 exist).
666
667 One use is for the kexec on panic case where the recovery kernel
668 must live at a different physical address than the primary
669 kernel.
670
671 Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address
672 it has been loaded at and the compile time physical addresses
673 CONFIG_PHYSICAL_START is ignored. However CONFIG_PHYSICAL_START
674 setting can still be useful to bootwrappers that need to know the
675 load location of the kernel (eg. u-boot/mkimage).
676
677config PAGE_OFFSET_BOOL
678 bool "Set custom page offset address"
679 depends on ADVANCED_OPTIONS
680 help
681 This option allows you to set the kernel virtual address at which
682 the kernel will map low memory. This can be useful in optimizing
683 the virtual memory layout of the system.
684
685 Say N here unless you know what you are doing.
686
687config PAGE_OFFSET
688 hex "Virtual address of memory base" if PAGE_OFFSET_BOOL
689 default "0xc0000000"
690
659config KERNEL_START_BOOL 691config KERNEL_START_BOOL
660 bool "Set custom kernel base address" 692 bool "Set custom kernel base address"
661 depends on ADVANCED_OPTIONS 693 depends on ADVANCED_OPTIONS
662 help 694 help
663 This option allows you to set the kernel virtual address at which 695 This option allows you to set the kernel virtual address at which
664 the kernel will map low memory (the kernel image will be linked at 696 the kernel will be loaded. Normally this should match PAGE_OFFSET
665 this address). This can be useful in optimizing the virtual memory 697 however there are times (like kdump) that one might not want them
666 layout of the system. 698 to be the same.
667 699
668 Say N here unless you know what you are doing. 700 Say N here unless you know what you are doing.
669 701
670config KERNEL_START 702config KERNEL_START
671 hex "Virtual address of kernel base" if KERNEL_START_BOOL 703 hex "Virtual address of kernel base" if KERNEL_START_BOOL
704 default PAGE_OFFSET if PAGE_OFFSET_BOOL
705 default "0xc2000000" if CRASH_DUMP
672 default "0xc0000000" 706 default "0xc0000000"
673 707
708config PHYSICAL_START_BOOL
709 bool "Set physical address where the kernel is loaded"
710 depends on ADVANCED_OPTIONS && FLATMEM && FSL_BOOKE
711 help
712 This gives the physical address where the kernel is loaded.
713
714 Say N here unless you know what you are doing.
715
716config PHYSICAL_START
717 hex "Physical address where the kernel is loaded" if PHYSICAL_START_BOOL
718 default "0x02000000" if PPC_STD_MMU && CRASH_DUMP
719 default "0x00000000"
720
721config PHYSICAL_ALIGN
722 hex
723 default "0x10000000" if FSL_BOOKE
724 help
725 This value puts the alignment restrictions on physical address
726 where kernel is loaded and run from. Kernel is compiled for an
727 address which meets above alignment restriction.
728
674config TASK_SIZE_BOOL 729config TASK_SIZE_BOOL
675 bool "Set custom user task size" 730 bool "Set custom user task size"
676 depends on ADVANCED_OPTIONS 731 depends on ADVANCED_OPTIONS
@@ -717,9 +772,17 @@ config PIN_TLB
717endmenu 772endmenu
718 773
719if PPC64 774if PPC64
775config PAGE_OFFSET
776 hex
777 default "0xc000000000000000"
720config KERNEL_START 778config KERNEL_START
721 hex 779 hex
780 default "0xc000000002000000" if CRASH_DUMP
722 default "0xc000000000000000" 781 default "0xc000000000000000"
782config PHYSICAL_START
783 hex
784 default "0x02000000" if CRASH_DUMP
785 default "0x00000000"
723endif 786endif
724 787
725source "net/Kconfig" 788source "net/Kconfig"
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 4ff744143566..e581524d85bc 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -371,6 +371,17 @@ skpinv: addi r6,r6,1 /* Increment */
371 371
372 bl early_init 372 bl early_init
373 373
374#ifdef CONFIG_RELOCATABLE
375 lis r3,kernstart_addr@ha
376 la r3,kernstart_addr@l(r3)
377#ifdef CONFIG_PHYS_64BIT
378 stw r23,0(r3)
379 stw r25,4(r3)
380#else
381 stw r25,0(r3)
382#endif
383#endif
384
374 mfspr r3,SPRN_TLB1CFG 385 mfspr r3,SPRN_TLB1CFG
375 andi. r3,r3,0xfff 386 andi. r3,r3,0xfff
376 lis r4,num_tlbcam_entries@ha 387 lis r4,num_tlbcam_entries@ha
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 3bfe7837e820..2aefe2a4129a 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -53,6 +53,7 @@
53#include <asm/pci-bridge.h> 53#include <asm/pci-bridge.h>
54#include <asm/phyp_dump.h> 54#include <asm/phyp_dump.h>
55#include <asm/kexec.h> 55#include <asm/kexec.h>
56#include <mm/mmu_decl.h>
56 57
57#ifdef DEBUG 58#ifdef DEBUG
58#define DBG(fmt...) printk(KERN_ERR fmt) 59#define DBG(fmt...) printk(KERN_ERR fmt)
@@ -978,7 +979,10 @@ static int __init early_init_dt_scan_memory(unsigned long node,
978 } 979 }
979#endif 980#endif
980 lmb_add(base, size); 981 lmb_add(base, size);
982
983 memstart_addr = min((u64)memstart_addr, base);
981 } 984 }
985
982 return 0; 986 return 0;
983} 987}
984 988
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 31ada9fdfc5c..153a48dc8f40 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -435,7 +435,7 @@ void __init setup_system(void)
435 printk("htab_address = 0x%p\n", htab_address); 435 printk("htab_address = 0x%p\n", htab_address);
436 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); 436 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
437#if PHYSICAL_START > 0 437#if PHYSICAL_START > 0
438 printk("physical_start = 0x%x\n", PHYSICAL_START); 438 printk("physical_start = 0x%lx\n", PHYSICAL_START);
439#endif 439#endif
440 printk("-----------------------------------------------------\n"); 440 printk("-----------------------------------------------------\n");
441 441
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index ada249bf9779..ce10e2b1b902 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -202,7 +202,7 @@ adjust_total_lowmem(void)
202 cam_max_size = max_lowmem_size; 202 cam_max_size = max_lowmem_size;
203 203
204 /* adjust lowmem size to max_lowmem_size */ 204 /* adjust lowmem size to max_lowmem_size */
205 ram = min(max_lowmem_size, total_lowmem); 205 ram = min(max_lowmem_size, (phys_addr_t)total_lowmem);
206 206
207 /* Calculate CAM values */ 207 /* Calculate CAM values */
208 __cam0 = 1UL << 2 * (__ilog2(ram) / 2); 208 __cam0 = 1UL << 2 * (__ilog2(ram) / 2);
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 47325f23c51f..578750e4ca88 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -59,7 +59,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
59unsigned long total_memory; 59unsigned long total_memory;
60unsigned long total_lowmem; 60unsigned long total_lowmem;
61 61
62phys_addr_t memstart_addr; 62phys_addr_t memstart_addr = (phys_addr_t)~0ull;
63EXPORT_SYMBOL(memstart_addr);
64phys_addr_t kernstart_addr;
65EXPORT_SYMBOL(kernstart_addr);
63phys_addr_t lowmem_end_addr; 66phys_addr_t lowmem_end_addr;
64 67
65int boot_mapsize; 68int boot_mapsize;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 698bd000f98b..c5ac532a0161 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -72,7 +72,8 @@
72#warning TASK_SIZE is smaller than it needs to be. 72#warning TASK_SIZE is smaller than it needs to be.
73#endif 73#endif
74 74
75phys_addr_t memstart_addr; 75phys_addr_t memstart_addr = ~0;
76phys_addr_t kernstart_addr;
76 77
77void free_initmem(void) 78void free_initmem(void)
78{ 79{
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 16def4dcff6d..0062e6b1c555 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -216,7 +216,7 @@ void __init do_init_bootmem(void)
216 unsigned long total_pages; 216 unsigned long total_pages;
217 int boot_mapsize; 217 int boot_mapsize;
218 218
219 max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 219 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
220 total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; 220 total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
221#ifdef CONFIG_HIGHMEM 221#ifdef CONFIG_HIGHMEM
222 total_pages = total_lowmem >> PAGE_SHIFT; 222 total_pages = total_lowmem >> PAGE_SHIFT;
@@ -232,7 +232,8 @@ void __init do_init_bootmem(void)
232 232
233 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 233 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
234 234
235 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); 235 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
236 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
236 237
237 /* Add active regions with valid PFNs */ 238 /* Add active regions with valid PFNs */
238 for (i = 0; i < lmb.memory.cnt; i++) { 239 for (i = 0; i < lmb.memory.cnt; i++) {
diff --git a/include/asm-powerpc/kdump.h b/include/asm-powerpc/kdump.h
index 10e8eb1e6f4f..f6c93c716898 100644
--- a/include/asm-powerpc/kdump.h
+++ b/include/asm-powerpc/kdump.h
@@ -11,16 +11,11 @@
11 11
12#ifdef CONFIG_CRASH_DUMP 12#ifdef CONFIG_CRASH_DUMP
13 13
14#define PHYSICAL_START KDUMP_KERNELBASE
15#define KDUMP_TRAMPOLINE_START 0x0100 14#define KDUMP_TRAMPOLINE_START 0x0100
16#define KDUMP_TRAMPOLINE_END 0x3000 15#define KDUMP_TRAMPOLINE_END 0x3000
17 16
18#define KDUMP_MIN_TCE_ENTRIES 2048 17#define KDUMP_MIN_TCE_ENTRIES 2048
19 18
20#else /* !CONFIG_CRASH_DUMP */
21
22#define PHYSICAL_START 0x0
23
24#endif /* CONFIG_CRASH_DUMP */ 19#endif /* CONFIG_CRASH_DUMP */
25 20
26#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
index 6c850609b847..cffdf0eb0df6 100644
--- a/include/asm-powerpc/page.h
+++ b/include/asm-powerpc/page.h
@@ -12,6 +12,7 @@
12 12
13#include <asm/asm-compat.h> 13#include <asm/asm-compat.h>
14#include <asm/kdump.h> 14#include <asm/kdump.h>
15#include <asm/types.h>
15 16
16/* 17/*
17 * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software 18 * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
@@ -42,8 +43,23 @@
42 * 43 *
43 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET. 44 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
44 * 45 *
45 * To get a physical address from a virtual one you subtract PAGE_OFFSET, 46 * PAGE_OFFSET is the virtual address of the start of lowmem.
46 * _not_ KERNELBASE. 47 *
48 * PHYSICAL_START is the physical address of the start of the kernel.
49 *
50 * MEMORY_START is the physical address of the start of lowmem.
51 *
52 * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
53 * ppc32 and based on how they are set we determine MEMORY_START.
54 *
55 * For the linear mapping the following equation should be true:
56 * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
57 *
58 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
59 *
60 * There are two was to determine a physical address from a virtual one:
61 * va = pa + PAGE_OFFSET - MEMORY_START
62 * va = pa + KERNELBASE - PHYSICAL_START
47 * 63 *
48 * If you want to know something's offset from the start of the kernel you 64 * If you want to know something's offset from the start of the kernel you
49 * should subtract KERNELBASE. 65 * should subtract KERNELBASE.
@@ -51,20 +67,33 @@
51 * If you want to test if something's a kernel address, use is_kernel_addr(). 67 * If you want to test if something's a kernel address, use is_kernel_addr().
52 */ 68 */
53 69
54#define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START) 70#define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
55#define KERNELBASE (PAGE_OFFSET + PHYSICAL_START) 71#define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
56#define LOAD_OFFSET PAGE_OFFSET 72#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
73
74#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_FLATMEM)
75#ifndef __ASSEMBLY__
76extern phys_addr_t memstart_addr;
77extern phys_addr_t kernstart_addr;
78#endif
79#define PHYSICAL_START kernstart_addr
80#define MEMORY_START memstart_addr
81#else
82#define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
83#define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
84#endif
57 85
58#ifdef CONFIG_FLATMEM 86#ifdef CONFIG_FLATMEM
59#define pfn_valid(pfn) ((pfn) < max_mapnr) 87#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT)
88#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr))
60#endif 89#endif
61 90
62#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 91#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
63#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 92#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
64#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 93#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
65 94
66#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) 95#define __va(x) ((void *)((unsigned long)(x) - PHYSICAL_START + KERNELBASE))
67#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) 96#define __pa(x) ((unsigned long)(x) + PHYSICAL_START - KERNELBASE)
68 97
69/* 98/*
70 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, 99 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
diff --git a/include/asm-powerpc/page_32.h b/include/asm-powerpc/page_32.h
index 51f8134b5939..ebfae530a379 100644
--- a/include/asm-powerpc/page_32.h
+++ b/include/asm-powerpc/page_32.h
@@ -1,6 +1,12 @@
1#ifndef _ASM_POWERPC_PAGE_32_H 1#ifndef _ASM_POWERPC_PAGE_32_H
2#define _ASM_POWERPC_PAGE_32_H 2#define _ASM_POWERPC_PAGE_32_H
3 3
4#if defined(CONFIG_PHYSICAL_ALIGN) && (CONFIG_PHYSICAL_START != 0)
5#if (CONFIG_PHYSICAL_START % CONFIG_PHYSICAL_ALIGN) != 0
6#error "CONFIG_PHYSICAL_START must be a multiple of CONFIG_PHYSICAL_ALIGN"
7#endif
8#endif
9
4#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32 10#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
5 11
6#ifdef CONFIG_NOT_COHERENT_CACHE 12#ifdef CONFIG_NOT_COHERENT_CACHE