diff options
author | Kevin Hao <haokexin@gmail.com> | 2013-12-24 02:12:07 -0500 |
---|---|---|
committer | Scott Wood <scottwood@freescale.com> | 2014-01-09 18:52:16 -0500 |
commit | 78a235efdc42ff363de81fdbc171385e8b86b69b (patch) | |
tree | 5e23fc68849e1ad9f86b10b019a6c844c75e2b54 /arch | |
parent | dd189692d40948d6445bbaeb8cb9bf9d15f54dc6 (diff) |
powerpc/fsl_booke: set the tlb entry for the kernel address in AS1
We use the tlb1 entries to map low mem to the kernel space. In the
current code, it assumes that the first tlb entry would cover the
kernel image. But this is not true for some special cases, such as
when we run a relocatable kernel above the 64M or set
CONFIG_KERNEL_START above 64M. So we choose to switch to address
space 1 before setting these tlb entries.
Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kernel/head_fsl_booke.S | 81 | ||||
-rw-r--r-- | arch/powerpc/mm/fsl_booke_mmu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 2 |
3 files changed, 85 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 19bd574bda9d..75f0223e6d0d 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
@@ -1157,6 +1157,87 @@ __secondary_hold_acknowledge: | |||
1157 | #endif | 1157 | #endif |
1158 | 1158 | ||
1159 | /* | 1159 | /* |
1160 | * Create a tlb entry with the same effective and physical address as | ||
1161 | * the tlb entry used by the current running code. But set the TS to 1. | ||
1162 | * Then switch to the address space 1. It will return with the r3 set to | ||
1163 | * the ESEL of the new created tlb. | ||
1164 | */ | ||
1165 | _GLOBAL(switch_to_as1) | ||
1166 | mflr r5 | ||
1167 | |||
1168 | /* Find a entry not used */ | ||
1169 | mfspr r3,SPRN_TLB1CFG | ||
1170 | andi. r3,r3,0xfff | ||
1171 | mfspr r4,SPRN_PID | ||
1172 | rlwinm r4,r4,16,0x3fff0000 /* turn PID into MAS6[SPID] */ | ||
1173 | mtspr SPRN_MAS6,r4 | ||
1174 | 1: lis r4,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
1175 | addi r3,r3,-1 | ||
1176 | rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ | ||
1177 | mtspr SPRN_MAS0,r4 | ||
1178 | tlbre | ||
1179 | mfspr r4,SPRN_MAS1 | ||
1180 | andis. r4,r4,MAS1_VALID@h | ||
1181 | bne 1b | ||
1182 | |||
1183 | /* Get the tlb entry used by the current running code */ | ||
1184 | bl 0f | ||
1185 | 0: mflr r4 | ||
1186 | tlbsx 0,r4 | ||
1187 | |||
1188 | mfspr r4,SPRN_MAS1 | ||
1189 | ori r4,r4,MAS1_TS /* Set the TS = 1 */ | ||
1190 | mtspr SPRN_MAS1,r4 | ||
1191 | |||
1192 | mfspr r4,SPRN_MAS0 | ||
1193 | rlwinm r4,r4,0,~MAS0_ESEL_MASK | ||
1194 | rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ | ||
1195 | mtspr SPRN_MAS0,r4 | ||
1196 | tlbwe | ||
1197 | isync | ||
1198 | sync | ||
1199 | |||
1200 | mfmsr r4 | ||
1201 | ori r4,r4,MSR_IS | MSR_DS | ||
1202 | mtspr SPRN_SRR0,r5 | ||
1203 | mtspr SPRN_SRR1,r4 | ||
1204 | sync | ||
1205 | rfi | ||
1206 | |||
1207 | /* | ||
1208 | * Restore to the address space 0 and also invalidate the tlb entry created | ||
1209 | * by switch_to_as1. | ||
1210 | */ | ||
1211 | _GLOBAL(restore_to_as0) | ||
1212 | mflr r0 | ||
1213 | |||
1214 | bl 0f | ||
1215 | 0: mflr r9 | ||
1216 | addi r9,r9,1f - 0b | ||
1217 | |||
1218 | mfmsr r7 | ||
1219 | li r8,(MSR_IS | MSR_DS) | ||
1220 | andc r7,r7,r8 | ||
1221 | |||
1222 | mtspr SPRN_SRR0,r9 | ||
1223 | mtspr SPRN_SRR1,r7 | ||
1224 | sync | ||
1225 | rfi | ||
1226 | |||
1227 | /* Invalidate the temporary tlb entry for AS1 */ | ||
1228 | 1: lis r9,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
1229 | rlwimi r9,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ | ||
1230 | mtspr SPRN_MAS0,r9 | ||
1231 | tlbre | ||
1232 | mfspr r9,SPRN_MAS1 | ||
1233 | rlwinm r9,r9,0,2,31 /* Clear MAS1 Valid and IPPROT */ | ||
1234 | mtspr SPRN_MAS1,r9 | ||
1235 | tlbwe | ||
1236 | isync | ||
1237 | mtlr r0 | ||
1238 | blr | ||
1239 | |||
1240 | /* | ||
1160 | * We put a few things here that have to be page-aligned. This stuff | 1241 | * We put a few things here that have to be page-aligned. This stuff |
1161 | * goes at the beginning of the data segment, which is page-aligned. | 1242 | * goes at the beginning of the data segment, which is page-aligned. |
1162 | */ | 1243 | */ |
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index ce4a1163ddd3..1d54f6d35e71 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c | |||
@@ -222,7 +222,9 @@ void __init adjust_total_lowmem(void) | |||
222 | /* adjust lowmem size to __max_low_memory */ | 222 | /* adjust lowmem size to __max_low_memory */ |
223 | ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); | 223 | ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); |
224 | 224 | ||
225 | i = switch_to_as1(); | ||
225 | __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM); | 226 | __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM); |
227 | restore_to_as0(i); | ||
226 | 228 | ||
227 | pr_info("Memory CAM mapping: "); | 229 | pr_info("Memory CAM mapping: "); |
228 | for (i = 0; i < tlbcam_index - 1; i++) | 230 | for (i = 0; i < tlbcam_index - 1; i++) |
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 83eb5d5f53d5..eefbf7bb4331 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h | |||
@@ -148,6 +148,8 @@ extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, | |||
148 | extern void MMU_init_hw(void); | 148 | extern void MMU_init_hw(void); |
149 | extern unsigned long mmu_mapin_ram(unsigned long top); | 149 | extern unsigned long mmu_mapin_ram(unsigned long top); |
150 | extern void adjust_total_lowmem(void); | 150 | extern void adjust_total_lowmem(void); |
151 | extern int switch_to_as1(void); | ||
152 | extern void restore_to_as0(int esel); | ||
151 | #endif | 153 | #endif |
152 | extern void loadcam_entry(unsigned int index); | 154 | extern void loadcam_entry(unsigned int index); |
153 | 155 | ||