aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorKevin Hao <haokexin@gmail.com>2013-12-24 02:12:10 -0500
committerScott Wood <scottwood@freescale.com>2014-01-09 18:52:17 -0500
commit7d2471f9fa85089beb1cb9436ffc28f9e11e518d (patch)
tree1d3c786feb97e2ef82a417a9a95dcecf82bcd5da /arch/powerpc
parent813125d83372e19edecaba811d4d0dc115d36819 (diff)
powerpc/fsl_booke: make sure PAGE_OFFSET map to memstart_addr for relocatable kernel
This is always true for a non-relocatable kernel. Otherwise the kernel would get stuck. But for a relocatable kernel, it seems a little complicated. When booting a relocatable kernel, we just align the kernel start addr to 64M and map the PAGE_OFFSET from there. The relocation will base on this virtual address. But if this address is not the same as the memstart_addr, we will have to change the map of PAGE_OFFSET to the real memstart_addr and do another relocation again. Signed-off-by: Kevin Hao <haokexin@gmail.com> [scottwood@freescale.com: make offset long and non-negative in simple case] Signed-off-by: Scott Wood <scottwood@freescale.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S74
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c42
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
3 files changed, 106 insertions, 12 deletions
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 75f0223e6d0d..b1f7edc3c360 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -81,6 +81,39 @@ _ENTRY(_start);
81 mr r23,r3 81 mr r23,r3
82 mr r25,r4 82 mr r25,r4
83 83
84 bl 0f
850: mflr r8
86 addis r3,r8,(is_second_reloc - 0b)@ha
87 lwz r19,(is_second_reloc - 0b)@l(r3)
88
89 /* Check if this is the second relocation. */
90 cmpwi r19,1
91 bne 1f
92
93 /*
94 * For the second relocation, we already get the real memstart_addr
95 * from device tree. So we will map PAGE_OFFSET to memstart_addr,
96 * then the virtual address of start kernel should be:
97 * PAGE_OFFSET + (kernstart_addr - memstart_addr)
98 * Since the offset between kernstart_addr and memstart_addr should
99 * never be beyond 1G, so we can just use the lower 32bit of them
100 * for the calculation.
101 */
102 lis r3,PAGE_OFFSET@h
103
104 addis r4,r8,(kernstart_addr - 0b)@ha
105 addi r4,r4,(kernstart_addr - 0b)@l
106 lwz r5,4(r4)
107
108 addis r6,r8,(memstart_addr - 0b)@ha
109 addi r6,r6,(memstart_addr - 0b)@l
110 lwz r7,4(r6)
111
112 subf r5,r7,r5
113 add r3,r3,r5
114 b 2f
115
1161:
84 /* 117 /*
85 * We have the runtime (virutal) address of our base. 118 * We have the runtime (virutal) address of our base.
86 * We calculate our shift of offset from a 64M page. 119 * We calculate our shift of offset from a 64M page.
@@ -94,7 +127,14 @@ _ENTRY(_start);
94 subf r3,r5,r6 /* r3 = r6 - r5 */ 127 subf r3,r5,r6 /* r3 = r6 - r5 */
95 add r3,r4,r3 /* Required Virtual Address */ 128 add r3,r4,r3 /* Required Virtual Address */
96 129
97 bl relocate 1302: bl relocate
131
132 /*
133 * For the second relocation, we already set the right tlb entries
134 * for the kernel space, so skip the code in fsl_booke_entry_mapping.S
135 */
136 cmpwi r19,1
137 beq set_ivor
98#endif 138#endif
99 139
100/* We try to not make any assumptions about how the boot loader 140/* We try to not make any assumptions about how the boot loader
@@ -122,6 +162,7 @@ _ENTRY(__early_start)
122#include "fsl_booke_entry_mapping.S" 162#include "fsl_booke_entry_mapping.S"
123#undef ENTRY_MAPPING_BOOT_SETUP 163#undef ENTRY_MAPPING_BOOT_SETUP
124 164
165set_ivor:
125 /* Establish the interrupt vector offsets */ 166 /* Establish the interrupt vector offsets */
126 SET_IVOR(0, CriticalInput); 167 SET_IVOR(0, CriticalInput);
127 SET_IVOR(1, MachineCheck); 168 SET_IVOR(1, MachineCheck);
@@ -207,11 +248,13 @@ _ENTRY(__early_start)
207 bl early_init 248 bl early_init
208 249
209#ifdef CONFIG_RELOCATABLE 250#ifdef CONFIG_RELOCATABLE
251 mr r3,r30
252 mr r4,r31
210#ifdef CONFIG_PHYS_64BIT 253#ifdef CONFIG_PHYS_64BIT
211 mr r3,r23 254 mr r5,r23
212 mr r4,r25 255 mr r6,r25
213#else 256#else
214 mr r3,r25 257 mr r5,r25
215#endif 258#endif
216 bl relocate_init 259 bl relocate_init
217#endif 260#endif
@@ -1207,6 +1250,9 @@ _GLOBAL(switch_to_as1)
1207/* 1250/*
1208 * Restore to the address space 0 and also invalidate the tlb entry created 1251 * Restore to the address space 0 and also invalidate the tlb entry created
1209 * by switch_to_as1. 1252 * by switch_to_as1.
1253 * r3 - the tlb entry which should be invalidated
1254 * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0)
1255 * r5 - device tree virtual address. If r4 is 0, r5 is ignored.
1210*/ 1256*/
1211_GLOBAL(restore_to_as0) 1257_GLOBAL(restore_to_as0)
1212 mflr r0 1258 mflr r0
@@ -1215,7 +1261,15 @@ _GLOBAL(restore_to_as0)
12150: mflr r9 12610: mflr r9
1216 addi r9,r9,1f - 0b 1262 addi r9,r9,1f - 0b
1217 1263
1218 mfmsr r7 1264 /*
1265 * We may map the PAGE_OFFSET in AS0 to a different physical address,
1266 * so we need calculate the right jump and device tree address based
1267 * on the offset passed by r4.
1268 */
1269 add r9,r9,r4
1270 add r5,r5,r4
1271
12722: mfmsr r7
1219 li r8,(MSR_IS | MSR_DS) 1273 li r8,(MSR_IS | MSR_DS)
1220 andc r7,r7,r8 1274 andc r7,r7,r8
1221 1275
@@ -1234,9 +1288,19 @@ _GLOBAL(restore_to_as0)
1234 mtspr SPRN_MAS1,r9 1288 mtspr SPRN_MAS1,r9
1235 tlbwe 1289 tlbwe
1236 isync 1290 isync
1291
1292 cmpwi r4,0
1293 bne 3f
1237 mtlr r0 1294 mtlr r0
1238 blr 1295 blr
1239 1296
1297 /*
1298 * The PAGE_OFFSET will map to a different physical address,
1299 * jump to _start to do another relocation again.
1300 */
13013: mr r3,r5
1302 bl _start
1303
1240/* 1304/*
1241 * We put a few things here that have to be page-aligned. This stuff 1305 * We put a few things here that have to be page-aligned. This stuff
1242 * goes at the beginning of the data segment, which is page-aligned. 1306 * goes at the beginning of the data segment, which is page-aligned.
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index ca956c83e3a2..95deb9fdf92f 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -231,7 +231,7 @@ void __init adjust_total_lowmem(void)
231 231
232 i = switch_to_as1(); 232 i = switch_to_as1();
233 __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM); 233 __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM);
234 restore_to_as0(i); 234 restore_to_as0(i, 0, 0);
235 235
236 pr_info("Memory CAM mapping: "); 236 pr_info("Memory CAM mapping: ");
237 for (i = 0; i < tlbcam_index - 1; i++) 237 for (i = 0; i < tlbcam_index - 1; i++)
@@ -252,17 +252,25 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
252} 252}
253 253
254#ifdef CONFIG_RELOCATABLE 254#ifdef CONFIG_RELOCATABLE
255notrace void __init relocate_init(phys_addr_t start) 255int __initdata is_second_reloc;
256notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
256{ 257{
257 unsigned long base = KERNELBASE; 258 unsigned long base = KERNELBASE;
258 259
260 kernstart_addr = start;
261 if (is_second_reloc) {
262 virt_phys_offset = PAGE_OFFSET - memstart_addr;
263 return;
264 }
265
259 /* 266 /*
260 * Relocatable kernel support based on processing of dynamic 267 * Relocatable kernel support based on processing of dynamic
261 * relocation entries. 268 * relocation entries. Before we get the real memstart_addr,
262 * Compute the virt_phys_offset : 269 * We will compute the virt_phys_offset like this:
263 * virt_phys_offset = stext.run - kernstart_addr 270 * virt_phys_offset = stext.run - kernstart_addr
264 * 271 *
265 * stext.run = (KERNELBASE & ~0x3ffffff) + (kernstart_addr & 0x3ffffff) 272 * stext.run = (KERNELBASE & ~0x3ffffff) +
273 * (kernstart_addr & 0x3ffffff)
266 * When we relocate, we have : 274 * When we relocate, we have :
267 * 275 *
268 * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff) 276 * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff)
@@ -272,10 +280,32 @@ notrace void __init relocate_init(phys_addr_t start)
272 * (kernstart_addr & ~0x3ffffff) 280 * (kernstart_addr & ~0x3ffffff)
273 * 281 *
274 */ 282 */
275 kernstart_addr = start;
276 start &= ~0x3ffffff; 283 start &= ~0x3ffffff;
277 base &= ~0x3ffffff; 284 base &= ~0x3ffffff;
278 virt_phys_offset = base - start; 285 virt_phys_offset = base - start;
286 early_get_first_memblock_info(__va(dt_ptr), NULL);
287 /*
288 * We now get the memstart_addr, then we should check if this
289 * address is the same as what the PAGE_OFFSET map to now. If
290 * not we have to change the map of PAGE_OFFSET to memstart_addr
291 * and do a second relocation.
292 */
293 if (start != memstart_addr) {
294 int n;
295 long offset = start - memstart_addr;
296
297 is_second_reloc = 1;
298 n = switch_to_as1();
299 /* map a 64M area for the second relocation */
300 if (memstart_addr > start)
301 map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM);
302 else
303 map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
304 0x4000000, CONFIG_LOWMEM_CAM_NUM);
305 restore_to_as0(n, offset, __va(dt_ptr));
306 /* We should never reach here */
307 panic("Relocation error");
308 }
279} 309}
280#endif 310#endif
281#endif 311#endif
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index eefbf7bb4331..91da910210cb 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -149,7 +149,7 @@ extern void MMU_init_hw(void);
149extern unsigned long mmu_mapin_ram(unsigned long top); 149extern unsigned long mmu_mapin_ram(unsigned long top);
150extern void adjust_total_lowmem(void); 150extern void adjust_total_lowmem(void);
151extern int switch_to_as1(void); 151extern int switch_to_as1(void);
152extern void restore_to_as0(int esel); 152extern void restore_to_as0(int esel, int offset, void *dt_ptr);
153#endif 153#endif
154extern void loadcam_entry(unsigned int index); 154extern void loadcam_entry(unsigned int index);
155 155