diff options
Diffstat (limited to 'arch/powerpc/kernel/head_fsl_booke.S')
-rw-r--r-- | arch/powerpc/kernel/head_fsl_booke.S | 74 |
1 files changed, 69 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 75f0223e6d0d..b1f7edc3c360 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
@@ -81,6 +81,39 @@ _ENTRY(_start); | |||
81 | mr r23,r3 | 81 | mr r23,r3 |
82 | mr r25,r4 | 82 | mr r25,r4 |
83 | 83 | ||
84 | bl 0f | ||
85 | 0: mflr r8 | ||
86 | addis r3,r8,(is_second_reloc - 0b)@ha | ||
87 | lwz r19,(is_second_reloc - 0b)@l(r3) | ||
88 | |||
89 | /* Check if this is the second relocation. */ | ||
90 | cmpwi r19,1 | ||
91 | bne 1f | ||
92 | |||
93 | /* | ||
94 | * For the second relocation, we already get the real memstart_addr | ||
95 | * from device tree. So we will map PAGE_OFFSET to memstart_addr, | ||
96 | * then the virtual address of start kernel should be: | ||
97 | * PAGE_OFFSET + (kernstart_addr - memstart_addr) | ||
98 | * Since the offset between kernstart_addr and memstart_addr should | ||
99 | * never be beyond 1G, so we can just use the lower 32bit of them | ||
100 | * for the calculation. | ||
101 | */ | ||
102 | lis r3,PAGE_OFFSET@h | ||
103 | |||
104 | addis r4,r8,(kernstart_addr - 0b)@ha | ||
105 | addi r4,r4,(kernstart_addr - 0b)@l | ||
106 | lwz r5,4(r4) | ||
107 | |||
108 | addis r6,r8,(memstart_addr - 0b)@ha | ||
109 | addi r6,r6,(memstart_addr - 0b)@l | ||
110 | lwz r7,4(r6) | ||
111 | |||
112 | subf r5,r7,r5 | ||
113 | add r3,r3,r5 | ||
114 | b 2f | ||
115 | |||
116 | 1: | ||
84 | /* | 117 | /* |
85 | * We have the runtime (virutal) address of our base. | 118 | * We have the runtime (virutal) address of our base. |
86 | * We calculate our shift of offset from a 64M page. | 119 | * We calculate our shift of offset from a 64M page. |
@@ -94,7 +127,14 @@ _ENTRY(_start); | |||
94 | subf r3,r5,r6 /* r3 = r6 - r5 */ | 127 | subf r3,r5,r6 /* r3 = r6 - r5 */ |
95 | add r3,r4,r3 /* Required Virtual Address */ | 128 | add r3,r4,r3 /* Required Virtual Address */ |
96 | 129 | ||
97 | bl relocate | 130 | 2: bl relocate |
131 | |||
132 | /* | ||
133 | * For the second relocation, we already set the right tlb entries | ||
134 | * for the kernel space, so skip the code in fsl_booke_entry_mapping.S | ||
135 | */ | ||
136 | cmpwi r19,1 | ||
137 | beq set_ivor | ||
98 | #endif | 138 | #endif |
99 | 139 | ||
100 | /* We try to not make any assumptions about how the boot loader | 140 | /* We try to not make any assumptions about how the boot loader |
@@ -122,6 +162,7 @@ _ENTRY(__early_start) | |||
122 | #include "fsl_booke_entry_mapping.S" | 162 | #include "fsl_booke_entry_mapping.S" |
123 | #undef ENTRY_MAPPING_BOOT_SETUP | 163 | #undef ENTRY_MAPPING_BOOT_SETUP |
124 | 164 | ||
165 | set_ivor: | ||
125 | /* Establish the interrupt vector offsets */ | 166 | /* Establish the interrupt vector offsets */ |
126 | SET_IVOR(0, CriticalInput); | 167 | SET_IVOR(0, CriticalInput); |
127 | SET_IVOR(1, MachineCheck); | 168 | SET_IVOR(1, MachineCheck); |
@@ -207,11 +248,13 @@ _ENTRY(__early_start) | |||
207 | bl early_init | 248 | bl early_init |
208 | 249 | ||
209 | #ifdef CONFIG_RELOCATABLE | 250 | #ifdef CONFIG_RELOCATABLE |
251 | mr r3,r30 | ||
252 | mr r4,r31 | ||
210 | #ifdef CONFIG_PHYS_64BIT | 253 | #ifdef CONFIG_PHYS_64BIT |
211 | mr r3,r23 | 254 | mr r5,r23 |
212 | mr r4,r25 | 255 | mr r6,r25 |
213 | #else | 256 | #else |
214 | mr r3,r25 | 257 | mr r5,r25 |
215 | #endif | 258 | #endif |
216 | bl relocate_init | 259 | bl relocate_init |
217 | #endif | 260 | #endif |
@@ -1207,6 +1250,9 @@ _GLOBAL(switch_to_as1) | |||
1207 | /* | 1250 | /* |
1208 | * Restore to the address space 0 and also invalidate the tlb entry created | 1251 | * Restore to the address space 0 and also invalidate the tlb entry created |
1209 | * by switch_to_as1. | 1252 | * by switch_to_as1. |
1253 | * r3 - the tlb entry which should be invalidated | ||
1254 | * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0) | ||
1255 | * r5 - device tree virtual address. If r4 is 0, r5 is ignored. | ||
1210 | */ | 1256 | */ |
1211 | _GLOBAL(restore_to_as0) | 1257 | _GLOBAL(restore_to_as0) |
1212 | mflr r0 | 1258 | mflr r0 |
@@ -1215,7 +1261,15 @@ _GLOBAL(restore_to_as0) | |||
1215 | 0: mflr r9 | 1261 | 0: mflr r9 |
1216 | addi r9,r9,1f - 0b | 1262 | addi r9,r9,1f - 0b |
1217 | 1263 | ||
1218 | mfmsr r7 | 1264 | /* |
1265 | * We may map the PAGE_OFFSET in AS0 to a different physical address, | ||
1266 | * so we need calculate the right jump and device tree address based | ||
1267 | * on the offset passed by r4. | ||
1268 | */ | ||
1269 | add r9,r9,r4 | ||
1270 | add r5,r5,r4 | ||
1271 | |||
1272 | 2: mfmsr r7 | ||
1219 | li r8,(MSR_IS | MSR_DS) | 1273 | li r8,(MSR_IS | MSR_DS) |
1220 | andc r7,r7,r8 | 1274 | andc r7,r7,r8 |
1221 | 1275 | ||
@@ -1234,9 +1288,19 @@ _GLOBAL(restore_to_as0) | |||
1234 | mtspr SPRN_MAS1,r9 | 1288 | mtspr SPRN_MAS1,r9 |
1235 | tlbwe | 1289 | tlbwe |
1236 | isync | 1290 | isync |
1291 | |||
1292 | cmpwi r4,0 | ||
1293 | bne 3f | ||
1237 | mtlr r0 | 1294 | mtlr r0 |
1238 | blr | 1295 | blr |
1239 | 1296 | ||
1297 | /* | ||
1298 | * The PAGE_OFFSET will map to a different physical address, | ||
1299 | * jump to _start to do another relocation again. | ||
1300 | */ | ||
1301 | 3: mr r3,r5 | ||
1302 | bl _start | ||
1303 | |||
1240 | /* | 1304 | /* |
1241 | * We put a few things here that have to be page-aligned. This stuff | 1305 | * We put a few things here that have to be page-aligned. This stuff |
1242 | * goes at the beginning of the data segment, which is page-aligned. | 1306 | * goes at the beginning of the data segment, which is page-aligned. |