diff options
-rw-r--r-- | arch/powerpc/Kconfig | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/fsl_booke_entry_mapping.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_fsl_booke.S | 34 | ||||
-rw-r--r-- | arch/powerpc/mm/fsl_booke_mmu.c | 28 |
4 files changed, 65 insertions, 1 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index f0a893142cee..4bb52e6488ea 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -884,7 +884,7 @@ config DYNAMIC_MEMSTART | |||
884 | 884 | ||
885 | config RELOCATABLE | 885 | config RELOCATABLE |
886 | bool "Build a relocatable kernel" | 886 | bool "Build a relocatable kernel" |
887 | depends on ADVANCED_OPTIONS && FLATMEM && 44x | 887 | depends on ADVANCED_OPTIONS && FLATMEM && (44x || FSL_BOOKE) |
888 | select NONSTATIC_KERNEL | 888 | select NONSTATIC_KERNEL |
889 | help | 889 | help |
890 | This builds a kernel image that is capable of running at the | 890 | This builds a kernel image that is capable of running at the |
diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S index a92c79be2728..f22e7e44fbf3 100644 --- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S +++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S | |||
@@ -176,6 +176,8 @@ skpinv: addi r6,r6,1 /* Increment */ | |||
176 | /* 7. Jump to KERNELBASE mapping */ | 176 | /* 7. Jump to KERNELBASE mapping */ |
177 | lis r6,(KERNELBASE & ~0xfff)@h | 177 | lis r6,(KERNELBASE & ~0xfff)@h |
178 | ori r6,r6,(KERNELBASE & ~0xfff)@l | 178 | ori r6,r6,(KERNELBASE & ~0xfff)@l |
179 | rlwinm r7,r25,0,0x03ffffff | ||
180 | add r6,r7,r6 | ||
179 | 181 | ||
180 | #elif defined(ENTRY_MAPPING_KEXEC_SETUP) | 182 | #elif defined(ENTRY_MAPPING_KEXEC_SETUP) |
181 | /* | 183 | /* |
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 196950f29c00..19bd574bda9d 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
@@ -73,6 +73,30 @@ _ENTRY(_start); | |||
73 | li r24,0 /* CPU number */ | 73 | li r24,0 /* CPU number */ |
74 | li r23,0 /* phys kernel start (high) */ | 74 | li r23,0 /* phys kernel start (high) */ |
75 | 75 | ||
76 | #ifdef CONFIG_RELOCATABLE | ||
77 | LOAD_REG_ADDR_PIC(r3, _stext) /* Get our current runtime base */ | ||
78 | |||
79 | /* Translate _stext address to physical, save in r23/r25 */ | ||
80 | bl get_phys_addr | ||
81 | mr r23,r3 | ||
82 | mr r25,r4 | ||
83 | |||
84 | /* | ||
85 | * We have the runtime (virutal) address of our base. | ||
86 | * We calculate our shift of offset from a 64M page. | ||
87 | * We could map the 64M page we belong to at PAGE_OFFSET and | ||
88 | * get going from there. | ||
89 | */ | ||
90 | lis r4,KERNELBASE@h | ||
91 | ori r4,r4,KERNELBASE@l | ||
92 | rlwinm r6,r25,0,0x3ffffff /* r6 = PHYS_START % 64M */ | ||
93 | rlwinm r5,r4,0,0x3ffffff /* r5 = KERNELBASE % 64M */ | ||
94 | subf r3,r5,r6 /* r3 = r6 - r5 */ | ||
95 | add r3,r4,r3 /* Required Virtual Address */ | ||
96 | |||
97 | bl relocate | ||
98 | #endif | ||
99 | |||
76 | /* We try to not make any assumptions about how the boot loader | 100 | /* We try to not make any assumptions about how the boot loader |
77 | * setup or used the TLBs. We invalidate all mappings from the | 101 | * setup or used the TLBs. We invalidate all mappings from the |
78 | * boot loader and load a single entry in TLB1[0] to map the | 102 | * boot loader and load a single entry in TLB1[0] to map the |
@@ -182,6 +206,16 @@ _ENTRY(__early_start) | |||
182 | 206 | ||
183 | bl early_init | 207 | bl early_init |
184 | 208 | ||
209 | #ifdef CONFIG_RELOCATABLE | ||
210 | #ifdef CONFIG_PHYS_64BIT | ||
211 | mr r3,r23 | ||
212 | mr r4,r25 | ||
213 | #else | ||
214 | mr r3,r25 | ||
215 | #endif | ||
216 | bl relocate_init | ||
217 | #endif | ||
218 | |||
185 | #ifdef CONFIG_DYNAMIC_MEMSTART | 219 | #ifdef CONFIG_DYNAMIC_MEMSTART |
186 | lis r3,kernstart_addr@ha | 220 | lis r3,kernstart_addr@ha |
187 | la r3,kernstart_addr@l(r3) | 221 | la r3,kernstart_addr@l(r3) |
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 07ba45b0f07c..ce4a1163ddd3 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c | |||
@@ -241,4 +241,32 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, | |||
241 | /* 64M mapped initially according to head_fsl_booke.S */ | 241 | /* 64M mapped initially according to head_fsl_booke.S */ |
242 | memblock_set_current_limit(min_t(u64, limit, 0x04000000)); | 242 | memblock_set_current_limit(min_t(u64, limit, 0x04000000)); |
243 | } | 243 | } |
244 | |||
245 | #ifdef CONFIG_RELOCATABLE | ||
246 | notrace void __init relocate_init(phys_addr_t start) | ||
247 | { | ||
248 | unsigned long base = KERNELBASE; | ||
249 | |||
250 | /* | ||
251 | * Relocatable kernel support based on processing of dynamic | ||
252 | * relocation entries. | ||
253 | * Compute the virt_phys_offset : | ||
254 | * virt_phys_offset = stext.run - kernstart_addr | ||
255 | * | ||
256 | * stext.run = (KERNELBASE & ~0x3ffffff) + (kernstart_addr & 0x3ffffff) | ||
257 | * When we relocate, we have : | ||
258 | * | ||
259 | * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff) | ||
260 | * | ||
261 | * hence: | ||
262 | * virt_phys_offset = (KERNELBASE & ~0x3ffffff) - | ||
263 | * (kernstart_addr & ~0x3ffffff) | ||
264 | * | ||
265 | */ | ||
266 | kernstart_addr = start; | ||
267 | start &= ~0x3ffffff; | ||
268 | base &= ~0x3ffffff; | ||
269 | virt_phys_offset = base - start; | ||
270 | } | ||
271 | #endif | ||
244 | #endif | 272 | #endif |