diff options
| author | Jonas Bonn <jonas@southpole.se> | 2011-06-04 04:05:39 -0400 |
|---|---|---|
| committer | Jonas Bonn <jonas@southpole.se> | 2011-07-22 12:46:27 -0400 |
| commit | 9d02a4283e9ce4e9ca11ff00615bdacdb0515a1a (patch) | |
| tree | 9b4d890053cfd0414b5f560ae233d086c5e365cd /arch | |
| parent | 82ed223c264def2b15ee4bec2e8c3048092ceb5f (diff) | |
OpenRISC: Boot code
Architecture code and early setup routines for booting Linux.
Signed-off-by: Jonas Bonn <jonas@southpole.se>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch')
| -rw-r--r-- | arch/openrisc/kernel/entry.S | 1128 | ||||
| -rw-r--r-- | arch/openrisc/kernel/head.S | 1607 | ||||
| -rw-r--r-- | arch/openrisc/kernel/init_task.c | 41 | ||||
| -rw-r--r-- | arch/openrisc/kernel/setup.c | 381 |
4 files changed, 3157 insertions, 0 deletions
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S new file mode 100644 index 000000000000..d5f9c35a583f --- /dev/null +++ b/arch/openrisc/kernel/entry.S | |||
| @@ -0,0 +1,1128 @@ | |||
| 1 | /* | ||
| 2 | * OpenRISC entry.S | ||
| 3 | * | ||
| 4 | * Linux architectural port borrowing liberally from similar works of | ||
| 5 | * others. All original copyrights apply as per the original source | ||
| 6 | * declaration. | ||
| 7 | * | ||
| 8 | * Modifications for the OpenRISC architecture: | ||
| 9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
| 10 | * Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.com> | ||
| 11 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
| 12 | * | ||
| 13 | * This program is free software; you can redistribute it and/or | ||
| 14 | * modify it under the terms of the GNU General Public License | ||
| 15 | * as published by the Free Software Foundation; either version | ||
| 16 | * 2 of the License, or (at your option) any later version. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include <linux/linkage.h> | ||
| 20 | |||
| 21 | #include <asm/processor.h> | ||
| 22 | #include <asm/unistd.h> | ||
| 23 | #include <asm/thread_info.h> | ||
| 24 | #include <asm/errno.h> | ||
| 25 | #include <asm/spr_defs.h> | ||
| 26 | #include <asm/page.h> | ||
| 27 | #include <asm/mmu.h> | ||
| 28 | #include <asm/pgtable.h> | ||
| 29 | #include <asm/asm-offsets.h> | ||
| 30 | |||
| 31 | #define DISABLE_INTERRUPTS(t1,t2) \ | ||
| 32 | l.mfspr t2,r0,SPR_SR ;\ | ||
| 33 | l.movhi t1,hi(~(SPR_SR_IEE|SPR_SR_TEE)) ;\ | ||
| 34 | l.ori t1,t1,lo(~(SPR_SR_IEE|SPR_SR_TEE)) ;\ | ||
| 35 | l.and t2,t2,t1 ;\ | ||
| 36 | l.mtspr r0,t2,SPR_SR | ||
| 37 | |||
| 38 | #define ENABLE_INTERRUPTS(t1) \ | ||
| 39 | l.mfspr t1,r0,SPR_SR ;\ | ||
| 40 | l.ori t1,t1,lo(SPR_SR_IEE|SPR_SR_TEE) ;\ | ||
| 41 | l.mtspr r0,t1,SPR_SR | ||
| 42 | |||
| 43 | /* =========================================================[ macros ]=== */ | ||
| 44 | |||
| 45 | /* | ||
| 46 | * We need to disable interrupts at beginning of RESTORE_ALL | ||
| 47 | * since interrupt might come in after we've loaded EPC return address | ||
| 48 | * and overwrite EPC with address somewhere in RESTORE_ALL | ||
| 49 | * which is of course wrong! | ||
| 50 | */ | ||
| 51 | |||
| 52 | #define RESTORE_ALL \ | ||
| 53 | DISABLE_INTERRUPTS(r3,r4) ;\ | ||
| 54 | l.lwz r3,PT_PC(r1) ;\ | ||
| 55 | l.mtspr r0,r3,SPR_EPCR_BASE ;\ | ||
| 56 | l.lwz r3,PT_SR(r1) ;\ | ||
| 57 | l.mtspr r0,r3,SPR_ESR_BASE ;\ | ||
| 58 | l.lwz r2,PT_GPR2(r1) ;\ | ||
| 59 | l.lwz r3,PT_GPR3(r1) ;\ | ||
| 60 | l.lwz r4,PT_GPR4(r1) ;\ | ||
| 61 | l.lwz r5,PT_GPR5(r1) ;\ | ||
| 62 | l.lwz r6,PT_GPR6(r1) ;\ | ||
| 63 | l.lwz r7,PT_GPR7(r1) ;\ | ||
| 64 | l.lwz r8,PT_GPR8(r1) ;\ | ||
| 65 | l.lwz r9,PT_GPR9(r1) ;\ | ||
| 66 | l.lwz r10,PT_GPR10(r1) ;\ | ||
| 67 | l.lwz r11,PT_GPR11(r1) ;\ | ||
| 68 | l.lwz r12,PT_GPR12(r1) ;\ | ||
| 69 | l.lwz r13,PT_GPR13(r1) ;\ | ||
| 70 | l.lwz r14,PT_GPR14(r1) ;\ | ||
| 71 | l.lwz r15,PT_GPR15(r1) ;\ | ||
| 72 | l.lwz r16,PT_GPR16(r1) ;\ | ||
| 73 | l.lwz r17,PT_GPR17(r1) ;\ | ||
| 74 | l.lwz r18,PT_GPR18(r1) ;\ | ||
| 75 | l.lwz r19,PT_GPR19(r1) ;\ | ||
| 76 | l.lwz r20,PT_GPR20(r1) ;\ | ||
| 77 | l.lwz r21,PT_GPR21(r1) ;\ | ||
| 78 | l.lwz r22,PT_GPR22(r1) ;\ | ||
| 79 | l.lwz r23,PT_GPR23(r1) ;\ | ||
| 80 | l.lwz r24,PT_GPR24(r1) ;\ | ||
| 81 | l.lwz r25,PT_GPR25(r1) ;\ | ||
| 82 | l.lwz r26,PT_GPR26(r1) ;\ | ||
| 83 | l.lwz r27,PT_GPR27(r1) ;\ | ||
| 84 | l.lwz r28,PT_GPR28(r1) ;\ | ||
| 85 | l.lwz r29,PT_GPR29(r1) ;\ | ||
| 86 | l.lwz r30,PT_GPR30(r1) ;\ | ||
| 87 | l.lwz r31,PT_GPR31(r1) ;\ | ||
| 88 | l.lwz r1,PT_SP(r1) ;\ | ||
| 89 | l.rfe | ||
| 90 | |||
| 91 | |||
| 92 | #define EXCEPTION_ENTRY(handler) \ | ||
| 93 | .global handler ;\ | ||
| 94 | handler: ;\ | ||
| 95 | /* r1, EPCR, ESR a already saved */ ;\ | ||
| 96 | l.sw PT_GPR2(r1),r2 ;\ | ||
| 97 | l.sw PT_GPR3(r1),r3 ;\ | ||
| 98 | l.sw PT_ORIG_GPR11(r1),r11 ;\ | ||
| 99 | /* r4 already save */ ;\ | ||
| 100 | l.sw PT_GPR5(r1),r5 ;\ | ||
| 101 | l.sw PT_GPR6(r1),r6 ;\ | ||
| 102 | l.sw PT_GPR7(r1),r7 ;\ | ||
| 103 | l.sw PT_GPR8(r1),r8 ;\ | ||
| 104 | l.sw PT_GPR9(r1),r9 ;\ | ||
| 105 | /* r10 already saved */ ;\ | ||
| 106 | l.sw PT_GPR11(r1),r11 ;\ | ||
| 107 | /* r12 already saved */ ;\ | ||
| 108 | l.sw PT_GPR13(r1),r13 ;\ | ||
| 109 | l.sw PT_GPR14(r1),r14 ;\ | ||
| 110 | l.sw PT_GPR15(r1),r15 ;\ | ||
| 111 | l.sw PT_GPR16(r1),r16 ;\ | ||
| 112 | l.sw PT_GPR17(r1),r17 ;\ | ||
| 113 | l.sw PT_GPR18(r1),r18 ;\ | ||
| 114 | l.sw PT_GPR19(r1),r19 ;\ | ||
| 115 | l.sw PT_GPR20(r1),r20 ;\ | ||
| 116 | l.sw PT_GPR21(r1),r21 ;\ | ||
| 117 | l.sw PT_GPR22(r1),r22 ;\ | ||
| 118 | l.sw PT_GPR23(r1),r23 ;\ | ||
| 119 | l.sw PT_GPR24(r1),r24 ;\ | ||
| 120 | l.sw PT_GPR25(r1),r25 ;\ | ||
| 121 | l.sw PT_GPR26(r1),r26 ;\ | ||
| 122 | l.sw PT_GPR27(r1),r27 ;\ | ||
| 123 | l.sw PT_GPR28(r1),r28 ;\ | ||
| 124 | l.sw PT_GPR29(r1),r29 ;\ | ||
| 125 | /* r30 already save */ ;\ | ||
| 126 | /* l.sw PT_GPR30(r1),r30*/ ;\ | ||
| 127 | l.sw PT_GPR31(r1),r31 ;\ | ||
| 128 | l.sw PT_SYSCALLNO(r1),r0 | ||
| 129 | |||
| 130 | #define UNHANDLED_EXCEPTION(handler,vector) \ | ||
| 131 | .global handler ;\ | ||
| 132 | handler: ;\ | ||
| 133 | /* r1, EPCR, ESR already saved */ ;\ | ||
| 134 | l.sw PT_GPR2(r1),r2 ;\ | ||
| 135 | l.sw PT_GPR3(r1),r3 ;\ | ||
| 136 | l.sw PT_ORIG_GPR11(r1),r11 ;\ | ||
| 137 | l.sw PT_GPR5(r1),r5 ;\ | ||
| 138 | l.sw PT_GPR6(r1),r6 ;\ | ||
| 139 | l.sw PT_GPR7(r1),r7 ;\ | ||
| 140 | l.sw PT_GPR8(r1),r8 ;\ | ||
| 141 | l.sw PT_GPR9(r1),r9 ;\ | ||
| 142 | /* r10 already saved */ ;\ | ||
| 143 | l.sw PT_GPR11(r1),r11 ;\ | ||
| 144 | /* r12 already saved */ ;\ | ||
| 145 | l.sw PT_GPR13(r1),r13 ;\ | ||
| 146 | l.sw PT_GPR14(r1),r14 ;\ | ||
| 147 | l.sw PT_GPR15(r1),r15 ;\ | ||
| 148 | l.sw PT_GPR16(r1),r16 ;\ | ||
| 149 | l.sw PT_GPR17(r1),r17 ;\ | ||
| 150 | l.sw PT_GPR18(r1),r18 ;\ | ||
| 151 | l.sw PT_GPR19(r1),r19 ;\ | ||
| 152 | l.sw PT_GPR20(r1),r20 ;\ | ||
| 153 | l.sw PT_GPR21(r1),r21 ;\ | ||
| 154 | l.sw PT_GPR22(r1),r22 ;\ | ||
| 155 | l.sw PT_GPR23(r1),r23 ;\ | ||
| 156 | l.sw PT_GPR24(r1),r24 ;\ | ||
| 157 | l.sw PT_GPR25(r1),r25 ;\ | ||
| 158 | l.sw PT_GPR26(r1),r26 ;\ | ||
| 159 | l.sw PT_GPR27(r1),r27 ;\ | ||
| 160 | l.sw PT_GPR28(r1),r28 ;\ | ||
| 161 | l.sw PT_GPR29(r1),r29 ;\ | ||
| 162 | /* r31 already saved */ ;\ | ||
| 163 | l.sw PT_GPR30(r1),r30 ;\ | ||
| 164 | /* l.sw PT_GPR31(r1),r31 */ ;\ | ||
| 165 | l.sw PT_SYSCALLNO(r1),r0 ;\ | ||
| 166 | l.addi r3,r1,0 ;\ | ||
| 167 | /* r4 is exception EA */ ;\ | ||
| 168 | l.addi r5,r0,vector ;\ | ||
| 169 | l.jal unhandled_exception ;\ | ||
| 170 | l.nop ;\ | ||
| 171 | l.j _ret_from_exception ;\ | ||
| 172 | l.nop | ||
| 173 | |||
| 174 | /* | ||
| 175 | * NOTE: one should never assume that SPR_EPC, SPR_ESR, SPR_EEAR | ||
| 176 | * contain the same values as when exception we're handling | ||
| 177 | * occured. in fact they never do. if you need them use | ||
| 178 | * values saved on stack (for SPR_EPC, SPR_ESR) or content | ||
| 179 | * of r4 (for SPR_EEAR). for details look at EXCEPTION_HANDLE() | ||
| 180 | * in 'arch/or32/kernel/head.S' | ||
| 181 | */ | ||
| 182 | |||
| 183 | /* =====================================================[ exceptions] === */ | ||
| 184 | |||
| 185 | /* ---[ 0x100: RESET exception ]----------------------------------------- */ | ||
| 186 | |||
| 187 | EXCEPTION_ENTRY(_tng_kernel_start) | ||
| 188 | l.jal _start | ||
| 189 | l.andi r0,r0,0 | ||
| 190 | |||
| 191 | /* ---[ 0x200: BUS exception ]------------------------------------------- */ | ||
| 192 | |||
| 193 | EXCEPTION_ENTRY(_bus_fault_handler) | ||
| 194 | /* r4: EA of fault (set by EXCEPTION_HANDLE) */ | ||
| 195 | l.jal do_bus_fault | ||
| 196 | l.addi r3,r1,0 /* pt_regs */ | ||
| 197 | |||
| 198 | l.j _ret_from_exception | ||
| 199 | l.nop | ||
| 200 | |||
| 201 | /* ---[ 0x300: Data Page Fault exception ]------------------------------- */ | ||
| 202 | |||
| 203 | EXCEPTION_ENTRY(_data_page_fault_handler) | ||
| 204 | /* set up parameters for do_page_fault */ | ||
| 205 | l.addi r3,r1,0 // pt_regs | ||
| 206 | /* r4 set be EXCEPTION_HANDLE */ // effective address of fault | ||
| 207 | l.ori r5,r0,0x300 // exception vector | ||
| 208 | |||
| 209 | /* | ||
| 210 | * __PHX__: TODO | ||
| 211 | * | ||
| 212 | * all this can be written much simpler. look at | ||
| 213 | * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part | ||
| 214 | */ | ||
| 215 | #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX | ||
| 216 | l.lwz r6,PT_PC(r3) // address of an offending insn | ||
| 217 | l.lwz r6,0(r6) // instruction that caused pf | ||
| 218 | |||
| 219 | l.srli r6,r6,26 // check opcode for jump insn | ||
| 220 | l.sfeqi r6,0 // l.j | ||
| 221 | l.bf 8f | ||
| 222 | l.sfeqi r6,1 // l.jal | ||
| 223 | l.bf 8f | ||
| 224 | l.sfeqi r6,3 // l.bnf | ||
| 225 | l.bf 8f | ||
| 226 | l.sfeqi r6,4 // l.bf | ||
| 227 | l.bf 8f | ||
| 228 | l.sfeqi r6,0x11 // l.jr | ||
| 229 | l.bf 8f | ||
| 230 | l.sfeqi r6,0x12 // l.jalr | ||
| 231 | l.bf 8f | ||
| 232 | |||
| 233 | l.nop | ||
| 234 | |||
| 235 | l.j 9f | ||
| 236 | l.nop | ||
| 237 | 8: | ||
| 238 | |||
| 239 | l.lwz r6,PT_PC(r3) // address of an offending insn | ||
| 240 | l.addi r6,r6,4 | ||
| 241 | l.lwz r6,0(r6) // instruction that caused pf | ||
| 242 | l.srli r6,r6,26 // get opcode | ||
| 243 | 9: | ||
| 244 | |||
| 245 | #else | ||
| 246 | |||
| 247 | l.mfspr r6,r0,SPR_SR // SR | ||
| 248 | // l.lwz r6,PT_SR(r3) // ESR | ||
| 249 | l.andi r6,r6,SPR_SR_DSX // check for delay slot exception | ||
| 250 | l.sfeqi r6,0x1 // exception happened in delay slot | ||
| 251 | l.bnf 7f | ||
| 252 | l.lwz r6,PT_PC(r3) // address of an offending insn | ||
| 253 | |||
| 254 | l.addi r6,r6,4 // offending insn is in delay slot | ||
| 255 | 7: | ||
| 256 | l.lwz r6,0(r6) // instruction that caused pf | ||
| 257 | l.srli r6,r6,26 // check opcode for write access | ||
| 258 | #endif | ||
| 259 | |||
| 260 | l.sfgeui r6,0x34 // check opcode for write access | ||
| 261 | l.bnf 1f | ||
| 262 | l.sfleui r6,0x37 | ||
| 263 | l.bnf 1f | ||
| 264 | l.ori r6,r0,0x1 // write access | ||
| 265 | l.j 2f | ||
| 266 | l.nop | ||
| 267 | 1: l.ori r6,r0,0x0 // !write access | ||
| 268 | 2: | ||
| 269 | |||
| 270 | /* call fault.c handler in or32/mm/fault.c */ | ||
| 271 | l.jal do_page_fault | ||
| 272 | l.nop | ||
| 273 | l.j _ret_from_exception | ||
| 274 | l.nop | ||
| 275 | |||
| 276 | /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */ | ||
| 277 | |||
| 278 | EXCEPTION_ENTRY(_insn_page_fault_handler) | ||
| 279 | /* set up parameters for do_page_fault */ | ||
| 280 | l.addi r3,r1,0 // pt_regs | ||
| 281 | /* r4 set be EXCEPTION_HANDLE */ // effective address of fault | ||
| 282 | l.ori r5,r0,0x400 // exception vector | ||
| 283 | l.ori r6,r0,0x0 // !write access | ||
| 284 | |||
| 285 | /* call fault.c handler in or32/mm/fault.c */ | ||
| 286 | l.jal do_page_fault | ||
| 287 | l.nop | ||
| 288 | l.j _ret_from_exception | ||
| 289 | l.nop | ||
| 290 | |||
| 291 | |||
| 292 | /* ---[ 0x500: Timer exception ]----------------------------------------- */ | ||
| 293 | |||
| 294 | EXCEPTION_ENTRY(_timer_handler) | ||
| 295 | l.jal timer_interrupt | ||
| 296 | l.addi r3,r1,0 /* pt_regs */ | ||
| 297 | |||
| 298 | l.j _ret_from_intr | ||
| 299 | l.nop | ||
| 300 | |||
| 301 | /* ---[ 0x600: Aligment exception ]-------------------------------------- */ | ||
| 302 | |||
| 303 | EXCEPTION_ENTRY(_alignment_handler) | ||
| 304 | /* r4: EA of fault (set by EXCEPTION_HANDLE) */ | ||
| 305 | l.jal do_unaligned_access | ||
| 306 | l.addi r3,r1,0 /* pt_regs */ | ||
| 307 | |||
| 308 | l.j _ret_from_exception | ||
| 309 | l.nop | ||
| 310 | |||
| 311 | #if 0 | ||
| 312 | EXCEPTION_ENTRY(_aligment_handler) | ||
| 313 | // l.mfspr r2,r0,SPR_EEAR_BASE /* Load the efective addres */ | ||
| 314 | l.addi r2,r4,0 | ||
| 315 | // l.mfspr r5,r0,SPR_EPCR_BASE /* Load the insn address */ | ||
| 316 | l.lwz r5,PT_PC(r1) | ||
| 317 | |||
| 318 | l.lwz r3,0(r5) /* Load insn */ | ||
| 319 | l.srli r4,r3,26 /* Shift left to get the insn opcode */ | ||
| 320 | |||
| 321 | l.sfeqi r4,0x00 /* Check if the load/store insn is in delay slot */ | ||
| 322 | l.bf jmp | ||
| 323 | l.sfeqi r4,0x01 | ||
| 324 | l.bf jmp | ||
| 325 | l.sfeqi r4,0x03 | ||
| 326 | l.bf jmp | ||
| 327 | l.sfeqi r4,0x04 | ||
| 328 | l.bf jmp | ||
| 329 | l.sfeqi r4,0x11 | ||
| 330 | l.bf jr | ||
| 331 | l.sfeqi r4,0x12 | ||
| 332 | l.bf jr | ||
| 333 | l.nop | ||
| 334 | l.j 1f | ||
| 335 | l.addi r5,r5,4 /* Increment PC to get return insn address */ | ||
| 336 | |||
| 337 | jmp: | ||
| 338 | l.slli r4,r3,6 /* Get the signed extended jump length */ | ||
| 339 | l.srai r4,r4,4 | ||
| 340 | |||
| 341 | l.lwz r3,4(r5) /* Load the real load/store insn */ | ||
| 342 | |||
| 343 | l.add r5,r5,r4 /* Calculate jump target address */ | ||
| 344 | |||
| 345 | l.j 1f | ||
| 346 | l.srli r4,r3,26 /* Shift left to get the insn opcode */ | ||
| 347 | |||
| 348 | jr: | ||
| 349 | l.slli r4,r3,9 /* Shift to get the reg nb */ | ||
| 350 | l.andi r4,r4,0x7c | ||
| 351 | |||
| 352 | l.lwz r3,4(r5) /* Load the real load/store insn */ | ||
| 353 | |||
| 354 | l.add r4,r4,r1 /* Load the jump register value from the stack */ | ||
| 355 | l.lwz r5,0(r4) | ||
| 356 | |||
| 357 | l.srli r4,r3,26 /* Shift left to get the insn opcode */ | ||
| 358 | |||
| 359 | |||
| 360 | 1: | ||
| 361 | // l.mtspr r0,r5,SPR_EPCR_BASE | ||
| 362 | l.sw PT_PC(r1),r5 | ||
| 363 | |||
| 364 | l.sfeqi r4,0x26 | ||
| 365 | l.bf lhs | ||
| 366 | l.sfeqi r4,0x25 | ||
| 367 | l.bf lhz | ||
| 368 | l.sfeqi r4,0x22 | ||
| 369 | l.bf lws | ||
| 370 | l.sfeqi r4,0x21 | ||
| 371 | l.bf lwz | ||
| 372 | l.sfeqi r4,0x37 | ||
| 373 | l.bf sh | ||
| 374 | l.sfeqi r4,0x35 | ||
| 375 | l.bf sw | ||
| 376 | l.nop | ||
| 377 | |||
| 378 | 1: l.j 1b /* I don't know what to do */ | ||
| 379 | l.nop | ||
| 380 | |||
| 381 | lhs: l.lbs r5,0(r2) | ||
| 382 | l.slli r5,r5,8 | ||
| 383 | l.lbz r6,1(r2) | ||
| 384 | l.or r5,r5,r6 | ||
| 385 | l.srli r4,r3,19 | ||
| 386 | l.andi r4,r4,0x7c | ||
| 387 | l.add r4,r4,r1 | ||
| 388 | l.j align_end | ||
| 389 | l.sw 0(r4),r5 | ||
| 390 | |||
| 391 | lhz: l.lbz r5,0(r2) | ||
| 392 | l.slli r5,r5,8 | ||
| 393 | l.lbz r6,1(r2) | ||
| 394 | l.or r5,r5,r6 | ||
| 395 | l.srli r4,r3,19 | ||
| 396 | l.andi r4,r4,0x7c | ||
| 397 | l.add r4,r4,r1 | ||
| 398 | l.j align_end | ||
| 399 | l.sw 0(r4),r5 | ||
| 400 | |||
| 401 | lws: l.lbs r5,0(r2) | ||
| 402 | l.slli r5,r5,24 | ||
| 403 | l.lbz r6,1(r2) | ||
| 404 | l.slli r6,r6,16 | ||
| 405 | l.or r5,r5,r6 | ||
| 406 | l.lbz r6,2(r2) | ||
| 407 | l.slli r6,r6,8 | ||
| 408 | l.or r5,r5,r6 | ||
| 409 | l.lbz r6,3(r2) | ||
| 410 | l.or r5,r5,r6 | ||
| 411 | l.srli r4,r3,19 | ||
| 412 | l.andi r4,r4,0x7c | ||
| 413 | l.add r4,r4,r1 | ||
| 414 | l.j align_end | ||
| 415 | l.sw 0(r4),r5 | ||
| 416 | |||
| 417 | lwz: l.lbz r5,0(r2) | ||
| 418 | l.slli r5,r5,24 | ||
| 419 | l.lbz r6,1(r2) | ||
| 420 | l.slli r6,r6,16 | ||
| 421 | l.or r5,r5,r6 | ||
| 422 | l.lbz r6,2(r2) | ||
| 423 | l.slli r6,r6,8 | ||
| 424 | l.or r5,r5,r6 | ||
| 425 | l.lbz r6,3(r2) | ||
| 426 | l.or r5,r5,r6 | ||
| 427 | l.srli r4,r3,19 | ||
| 428 | l.andi r4,r4,0x7c | ||
| 429 | l.add r4,r4,r1 | ||
| 430 | l.j align_end | ||
| 431 | l.sw 0(r4),r5 | ||
| 432 | |||
| 433 | sh: | ||
| 434 | l.srli r4,r3,9 | ||
| 435 | l.andi r4,r4,0x7c | ||
| 436 | l.add r4,r4,r1 | ||
| 437 | l.lwz r5,0(r4) | ||
| 438 | l.sb 1(r2),r5 | ||
| 439 | l.srli r5,r5,8 | ||
| 440 | l.j align_end | ||
| 441 | l.sb 0(r2),r5 | ||
| 442 | |||
| 443 | sw: | ||
| 444 | l.srli r4,r3,9 | ||
| 445 | l.andi r4,r4,0x7c | ||
| 446 | l.add r4,r4,r1 | ||
| 447 | l.lwz r5,0(r4) | ||
| 448 | l.sb 3(r2),r5 | ||
| 449 | l.srli r5,r5,8 | ||
| 450 | l.sb 2(r2),r5 | ||
| 451 | l.srli r5,r5,8 | ||
| 452 | l.sb 1(r2),r5 | ||
| 453 | l.srli r5,r5,8 | ||
| 454 | l.j align_end | ||
| 455 | l.sb 0(r2),r5 | ||
| 456 | |||
| 457 | align_end: | ||
| 458 | l.j _ret_from_intr | ||
| 459 | l.nop | ||
| 460 | #endif | ||
| 461 | |||
| 462 | /* ---[ 0x700: Illegal insn exception ]---------------------------------- */ | ||
| 463 | |||
| 464 | EXCEPTION_ENTRY(_illegal_instruction_handler) | ||
| 465 | /* r4: EA of fault (set by EXCEPTION_HANDLE) */ | ||
| 466 | l.jal do_illegal_instruction | ||
| 467 | l.addi r3,r1,0 /* pt_regs */ | ||
| 468 | |||
| 469 | l.j _ret_from_exception | ||
| 470 | l.nop | ||
| 471 | |||
| 472 | /* ---[ 0x800: External interrupt exception ]---------------------------- */ | ||
| 473 | |||
| 474 | EXCEPTION_ENTRY(_external_irq_handler) | ||
| 475 | #ifdef CONFIG_OPENRISC_ESR_EXCEPTION_BUG_CHECK | ||
| 476 | l.lwz r4,PT_SR(r1) // were interrupts enabled ? | ||
| 477 | l.andi r4,r4,SPR_SR_IEE | ||
| 478 | l.sfeqi r4,0 | ||
| 479 | l.bnf 1f // ext irq enabled, all ok. | ||
| 480 | l.nop | ||
| 481 | |||
| 482 | l.addi r1,r1,-0x8 | ||
| 483 | l.movhi r3,hi(42f) | ||
| 484 | l.ori r3,r3,lo(42f) | ||
| 485 | l.sw 0x0(r1),r3 | ||
| 486 | l.jal printk | ||
| 487 | l.sw 0x4(r1),r4 | ||
| 488 | l.addi r1,r1,0x8 | ||
| 489 | |||
| 490 | .section .rodata, "a" | ||
| 491 | 42: | ||
| 492 | .string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r" | ||
| 493 | .align 4 | ||
| 494 | .previous | ||
| 495 | |||
| 496 | l.ori r4,r4,SPR_SR_IEE // fix the bug | ||
| 497 | // l.sw PT_SR(r1),r4 | ||
| 498 | 1: | ||
| 499 | #endif | ||
| 500 | l.addi r3,r1,0 | ||
| 501 | l.movhi r8,hi(do_IRQ) | ||
| 502 | l.ori r8,r8,lo(do_IRQ) | ||
| 503 | l.jalr r8 | ||
| 504 | l.nop | ||
| 505 | l.j _ret_from_intr | ||
| 506 | l.nop | ||
| 507 | |||
| 508 | /* ---[ 0x900: DTLB miss exception ]------------------------------------- */ | ||
| 509 | |||
| 510 | |||
| 511 | /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */ | ||
| 512 | |||
| 513 | |||
| 514 | /* ---[ 0xb00: Range exception ]----------------------------------------- */ | ||
| 515 | |||
| 516 | UNHANDLED_EXCEPTION(_vector_0xb00,0xb00) | ||
| 517 | |||
| 518 | /* ---[ 0xc00: Syscall exception ]--------------------------------------- */ | ||
| 519 | |||
| 520 | /* | ||
| 521 | * Syscalls are a special type of exception in that they are | ||
| 522 | * _explicitly_ invoked by userspace and can therefore be | ||
| 523 | * held to conform to the same ABI as normal functions with | ||
| 524 | * respect to whether registers are preserved across the call | ||
| 525 | * or not. | ||
| 526 | */ | ||
| 527 | |||
| 528 | /* Upon syscall entry we just save the callee-saved registers | ||
| 529 | * and not the call-clobbered ones. | ||
| 530 | */ | ||
| 531 | |||
| 532 | _string_syscall_return: | ||
| 533 | .string "syscall return %ld \n\r\0" | ||
| 534 | .align 4 | ||
| 535 | |||
| 536 | ENTRY(_sys_call_handler) | ||
| 537 | /* syscalls run with interrupts enabled */ | ||
| 538 | ENABLE_INTERRUPTS(r29) // enable interrupts, r29 is temp | ||
| 539 | |||
| 540 | /* r1, EPCR, ESR a already saved */ | ||
| 541 | l.sw PT_GPR2(r1),r2 | ||
| 542 | /* r3-r8 must be saved because syscall restart relies | ||
| 543 | * on us being able to restart the syscall args... technically | ||
| 544 | * they should be clobbered, otherwise | ||
| 545 | */ | ||
| 546 | l.sw PT_GPR3(r1),r3 | ||
| 547 | /* r4 already saved */ | ||
| 548 | /* r4 holds the EEAR address of the fault, load the original r4 */ | ||
| 549 | l.lwz r4,PT_GPR4(r1) | ||
| 550 | l.sw PT_GPR5(r1),r5 | ||
| 551 | l.sw PT_GPR6(r1),r6 | ||
| 552 | l.sw PT_GPR7(r1),r7 | ||
| 553 | l.sw PT_GPR8(r1),r8 | ||
| 554 | l.sw PT_GPR9(r1),r9 | ||
| 555 | /* r10 already saved */ | ||
| 556 | l.sw PT_GPR11(r1),r11 | ||
| 557 | l.sw PT_ORIG_GPR11(r1),r11 | ||
| 558 | /* r12,r13 already saved */ | ||
| 559 | |||
| 560 | /* r14-r28 (even) aren't touched by the syscall fast path below | ||
| 561 | * so we don't need to save them. However, the functions that return | ||
| 562 | * to userspace via a call to switch() DO need to save these because | ||
| 563 | * switch() effectively clobbers them... saving these registers for | ||
| 564 | * such functions is handled in their syscall wrappers (see fork, vfork, | ||
| 565 | * and clone, below). | ||
| 566 | |||
| 567 | /* r30 is the only register we clobber in the fast path */ | ||
| 568 | /* r30 already saved */ | ||
| 569 | /* l.sw PT_GPR30(r1),r30 */ | ||
| 570 | /* This is used by do_signal to determine whether to check for | ||
| 571 | * syscall restart or not */ | ||
| 572 | l.sw PT_SYSCALLNO(r1),r11 | ||
| 573 | |||
| 574 | _syscall_check_trace_enter: | ||
| 575 | /* If TIF_SYSCALL_TRACE is set, then we want to do syscall tracing */ | ||
| 576 | l.lwz r30,TI_FLAGS(r10) | ||
| 577 | l.andi r30,r30,_TIF_SYSCALL_TRACE | ||
| 578 | l.sfne r30,r0 | ||
| 579 | l.bf _syscall_trace_enter | ||
| 580 | l.nop | ||
| 581 | |||
| 582 | _syscall_check: | ||
| 583 | /* Ensure that the syscall number is reasonable */ | ||
| 584 | l.sfgeui r11,__NR_syscalls | ||
| 585 | l.bf _syscall_badsys | ||
| 586 | l.nop | ||
| 587 | |||
| 588 | _syscall_call: | ||
| 589 | l.movhi r29,hi(sys_call_table) | ||
| 590 | l.ori r29,r29,lo(sys_call_table) | ||
| 591 | l.slli r11,r11,2 | ||
| 592 | l.add r29,r29,r11 | ||
| 593 | l.lwz r29,0(r29) | ||
| 594 | |||
| 595 | l.jalr r29 | ||
| 596 | l.nop | ||
| 597 | |||
| 598 | _syscall_return: | ||
| 599 | /* All syscalls return here... just pay attention to ret_from_fork | ||
| 600 | * which does it in a round-about way. | ||
| 601 | */ | ||
| 602 | l.sw PT_GPR11(r1),r11 // save return value | ||
| 603 | |||
| 604 | #if 0 | ||
| 605 | _syscall_debug: | ||
| 606 | l.movhi r3,hi(_string_syscall_return) | ||
| 607 | l.ori r3,r3,lo(_string_syscall_return) | ||
| 608 | l.ori r27,r0,1 | ||
| 609 | l.sw -4(r1),r27 | ||
| 610 | l.sw -8(r1),r11 | ||
| 611 | l.addi r1,r1,-8 | ||
| 612 | l.movhi r27,hi(printk) | ||
| 613 | l.ori r27,r27,lo(printk) | ||
| 614 | l.jalr r27 | ||
| 615 | l.nop | ||
| 616 | l.addi r1,r1,8 | ||
| 617 | #endif | ||
| 618 | |||
| 619 | _syscall_check_trace_leave: | ||
| 620 | /* r30 is a callee-saved register so this should still hold the | ||
| 621 | * _TIF_SYSCALL_TRACE flag from _syscall_check_trace_enter above... | ||
| 622 | * _syscall_trace_leave expects syscall result to be in pt_regs->r11. | ||
| 623 | */ | ||
| 624 | l.sfne r30,r0 | ||
| 625 | l.bf _syscall_trace_leave | ||
| 626 | l.nop | ||
| 627 | |||
| 628 | /* This is where the exception-return code begins... interrupts need to be | ||
| 629 | * disabled the rest of the way here because we can't afford to miss any | ||
| 630 | * interrupts that set NEED_RESCHED or SIGNALPENDING... really true? */ | ||
| 631 | |||
| 632 | _syscall_check_work: | ||
| 633 | /* Here we need to disable interrupts */ | ||
| 634 | DISABLE_INTERRUPTS(r27,r29) | ||
| 635 | l.lwz r30,TI_FLAGS(r10) | ||
| 636 | l.andi r30,r30,_TIF_WORK_MASK | ||
| 637 | l.sfne r30,r0 | ||
| 638 | |||
| 639 | l.bnf _syscall_resume_userspace | ||
| 640 | l.nop | ||
| 641 | |||
| 642 | /* Work pending follows a different return path, so we need to | ||
| 643 | * make sure that all the call-saved registers get into pt_regs | ||
| 644 | * before branching... | ||
| 645 | */ | ||
| 646 | l.sw PT_GPR14(r1),r14 | ||
| 647 | l.sw PT_GPR16(r1),r16 | ||
| 648 | l.sw PT_GPR18(r1),r18 | ||
| 649 | l.sw PT_GPR20(r1),r20 | ||
| 650 | l.sw PT_GPR22(r1),r22 | ||
| 651 | l.sw PT_GPR24(r1),r24 | ||
| 652 | l.sw PT_GPR26(r1),r26 | ||
| 653 | l.sw PT_GPR28(r1),r28 | ||
| 654 | |||
| 655 | /* _work_pending needs to be called with interrupts disabled */ | ||
| 656 | l.j _work_pending | ||
| 657 | l.nop | ||
| 658 | |||
| 659 | _syscall_resume_userspace: | ||
| 660 | // ENABLE_INTERRUPTS(r29) | ||
| 661 | |||
| 662 | |||
| 663 | /* This is the hot path for returning to userspace from a syscall. If there's | ||
| 664 | * work to be done and the branch to _work_pending was taken above, then the | ||
| 665 | * return to userspace will be done via the normal exception return path... | ||
| 666 | * that path restores _all_ registers and will overwrite the "clobbered" | ||
| 667 | * registers with whatever garbage is in pt_regs -- that's OK because those | ||
| 668 | * registers are clobbered anyway and because the extra work is insignificant | ||
| 669 | * in the context of the extra work that _work_pending is doing. | ||
| 670 | |||
| 671 | /* Once again, syscalls are special and only guarantee to preserve the | ||
| 672 | * same registers as a normal function call */ | ||
| 673 | |||
| 674 | /* The assumption here is that the registers r14-r28 (even) are untouched and | ||
| 675 | * don't need to be restored... be sure that that's really the case! | ||
| 676 | */ | ||
| 677 | |||
| 678 | /* This is still too much... we should only be restoring what we actually | ||
| 679 | * clobbered... we should even be using 'scratch' (odd) regs above so that | ||
| 680 | * we don't need to restore anything, hardly... | ||
| 681 | */ | ||
| 682 | |||
| 683 | l.lwz r2,PT_GPR2(r1) | ||
| 684 | |||
| 685 | /* Restore args */ | ||
| 686 | /* r3-r8 are technically clobbered, but syscall restart needs these | ||
| 687 | * to be restored... | ||
| 688 | */ | ||
| 689 | l.lwz r3,PT_GPR3(r1) | ||
| 690 | l.lwz r4,PT_GPR4(r1) | ||
| 691 | l.lwz r5,PT_GPR5(r1) | ||
| 692 | l.lwz r6,PT_GPR6(r1) | ||
| 693 | l.lwz r7,PT_GPR7(r1) | ||
| 694 | l.lwz r8,PT_GPR8(r1) | ||
| 695 | |||
| 696 | l.lwz r9,PT_GPR9(r1) | ||
| 697 | l.lwz r10,PT_GPR10(r1) | ||
| 698 | l.lwz r11,PT_GPR11(r1) | ||
| 699 | |||
| 700 | /* r30 is the only register we clobber in the fast path */ | ||
| 701 | l.lwz r30,PT_GPR30(r1) | ||
| 702 | |||
| 703 | /* Here we use r13-r19 (odd) as scratch regs */ | ||
| 704 | l.lwz r13,PT_PC(r1) | ||
| 705 | l.lwz r15,PT_SR(r1) | ||
| 706 | l.lwz r1,PT_SP(r1) | ||
| 707 | /* Interrupts need to be disabled for setting EPCR and ESR | ||
| 708 | * so that another interrupt doesn't come in here and clobber | ||
| 709 | * them before we can use them for our l.rfe */ | ||
| 710 | DISABLE_INTERRUPTS(r17,r19) | ||
| 711 | l.mtspr r0,r13,SPR_EPCR_BASE | ||
| 712 | l.mtspr r0,r15,SPR_ESR_BASE | ||
| 713 | l.rfe | ||
| 714 | |||
| 715 | /* End of hot path! | ||
| 716 | * Keep the below tracing and error handling out of the hot path... | ||
| 717 | */ | ||
| 718 | |||
| 719 | _syscall_trace_enter: | ||
| 720 | /* Here we pass pt_regs to do_syscall_trace_enter. Make sure | ||
| 721 | * that function is really getting all the info it needs as | ||
| 722 | * pt_regs isn't a complete set of userspace regs, just the | ||
| 723 | * ones relevant to the syscall... | ||
| 724 | * | ||
| 725 | * Note use of delay slot for setting argument. | ||
| 726 | */ | ||
| 727 | l.jal do_syscall_trace_enter | ||
| 728 | l.addi r3,r1,0 | ||
| 729 | |||
| 730 | /* Restore arguments (not preserved across do_syscall_trace_enter) | ||
| 731 | * so that we can do the syscall for real and return to the syscall | ||
| 732 | * hot path. | ||
| 733 | */ | ||
| 734 | l.lwz r11,PT_SYSCALLNO(r1) | ||
| 735 | l.lwz r3,PT_GPR3(r1) | ||
| 736 | l.lwz r4,PT_GPR4(r1) | ||
| 737 | l.lwz r5,PT_GPR5(r1) | ||
| 738 | l.lwz r6,PT_GPR6(r1) | ||
| 739 | l.lwz r7,PT_GPR7(r1) | ||
| 740 | |||
| 741 | l.j _syscall_check | ||
| 742 | l.lwz r8,PT_GPR8(r1) | ||
| 743 | |||
| 744 | _syscall_trace_leave: | ||
| 745 | l.jal do_syscall_trace_leave | ||
| 746 | l.addi r3,r1,0 | ||
| 747 | |||
| 748 | l.j _syscall_check_work | ||
| 749 | l.nop | ||
| 750 | |||
| 751 | _syscall_badsys: | ||
| 752 | /* Here we effectively pretend to have executed an imaginary | ||
| 753 | * syscall that returns -ENOSYS and then return to the regular | ||
| 754 | * syscall hot path. | ||
| 755 | * Note that "return value" is set in the delay slot... | ||
| 756 | */ | ||
| 757 | l.j _syscall_return | ||
| 758 | l.addi r11,r0,-ENOSYS | ||
| 759 | |||
| 760 | /******* END SYSCALL HANDLING *******/ | ||
| 761 | |||
| 762 | /* ---[ 0xd00: Trap exception ]------------------------------------------ */ | ||
| 763 | |||
| 764 | UNHANDLED_EXCEPTION(_vector_0xd00,0xd00) | ||
| 765 | |||
| 766 | /* ---[ 0xe00: Trap exception ]------------------------------------------ */ | ||
| 767 | |||
| 768 | EXCEPTION_ENTRY(_trap_handler) | ||
| 769 | /* r4: EA of fault (set by EXCEPTION_HANDLE) */ | ||
| 770 | l.jal do_trap | ||
| 771 | l.addi r3,r1,0 /* pt_regs */ | ||
| 772 | |||
| 773 | l.j _ret_from_exception | ||
| 774 | l.nop | ||
| 775 | |||
| 776 | /* ---[ 0xf00: Reserved exception ]-------------------------------------- */ | ||
| 777 | |||
| 778 | UNHANDLED_EXCEPTION(_vector_0xf00,0xf00) | ||
| 779 | |||
| 780 | /* ---[ 0x1000: Reserved exception ]------------------------------------- */ | ||
| 781 | |||
| 782 | UNHANDLED_EXCEPTION(_vector_0x1000,0x1000) | ||
| 783 | |||
| 784 | /* ---[ 0x1100: Reserved exception ]------------------------------------- */ | ||
| 785 | |||
| 786 | UNHANDLED_EXCEPTION(_vector_0x1100,0x1100) | ||
| 787 | |||
| 788 | /* ---[ 0x1200: Reserved exception ]------------------------------------- */ | ||
| 789 | |||
| 790 | UNHANDLED_EXCEPTION(_vector_0x1200,0x1200) | ||
| 791 | |||
| 792 | /* ---[ 0x1300: Reserved exception ]------------------------------------- */ | ||
| 793 | |||
| 794 | UNHANDLED_EXCEPTION(_vector_0x1300,0x1300) | ||
| 795 | |||
| 796 | /* ---[ 0x1400: Reserved exception ]------------------------------------- */ | ||
| 797 | |||
| 798 | UNHANDLED_EXCEPTION(_vector_0x1400,0x1400) | ||
| 799 | |||
| 800 | /* ---[ 0x1500: Reserved exception ]------------------------------------- */ | ||
| 801 | |||
| 802 | UNHANDLED_EXCEPTION(_vector_0x1500,0x1500) | ||
| 803 | |||
| 804 | /* ---[ 0x1600: Reserved exception ]------------------------------------- */ | ||
| 805 | |||
| 806 | UNHANDLED_EXCEPTION(_vector_0x1600,0x1600) | ||
| 807 | |||
| 808 | /* ---[ 0x1700: Reserved exception ]------------------------------------- */ | ||
| 809 | |||
| 810 | UNHANDLED_EXCEPTION(_vector_0x1700,0x1700) | ||
| 811 | |||
| 812 | /* ---[ 0x1800: Reserved exception ]------------------------------------- */ | ||
| 813 | |||
| 814 | UNHANDLED_EXCEPTION(_vector_0x1800,0x1800) | ||
| 815 | |||
| 816 | /* ---[ 0x1900: Reserved exception ]------------------------------------- */ | ||
| 817 | |||
| 818 | UNHANDLED_EXCEPTION(_vector_0x1900,0x1900) | ||
| 819 | |||
| 820 | /* ---[ 0x1a00: Reserved exception ]------------------------------------- */ | ||
| 821 | |||
| 822 | UNHANDLED_EXCEPTION(_vector_0x1a00,0x1a00) | ||
| 823 | |||
| 824 | /* ---[ 0x1b00: Reserved exception ]------------------------------------- */ | ||
| 825 | |||
| 826 | UNHANDLED_EXCEPTION(_vector_0x1b00,0x1b00) | ||
| 827 | |||
| 828 | /* ---[ 0x1c00: Reserved exception ]------------------------------------- */ | ||
| 829 | |||
| 830 | UNHANDLED_EXCEPTION(_vector_0x1c00,0x1c00) | ||
| 831 | |||
| 832 | /* ---[ 0x1d00: Reserved exception ]------------------------------------- */ | ||
| 833 | |||
| 834 | UNHANDLED_EXCEPTION(_vector_0x1d00,0x1d00) | ||
| 835 | |||
| 836 | /* ---[ 0x1e00: Reserved exception ]------------------------------------- */ | ||
| 837 | |||
| 838 | UNHANDLED_EXCEPTION(_vector_0x1e00,0x1e00) | ||
| 839 | |||
| 840 | /* ---[ 0x1f00: Reserved exception ]------------------------------------- */ | ||
| 841 | |||
| 842 | UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00) | ||
| 843 | |||
| 844 | /* ========================================================[ return ] === */ | ||
| 845 | |||
| 846 | _work_pending: | ||
| 847 | /* | ||
| 848 | * if (current_thread_info->flags & _TIF_NEED_RESCHED) | ||
| 849 | * schedule(); | ||
| 850 | */ | ||
| 851 | l.lwz r5,TI_FLAGS(r10) | ||
| 852 | l.andi r3,r5,_TIF_NEED_RESCHED | ||
| 853 | l.sfnei r3,0 | ||
| 854 | l.bnf _work_notifysig | ||
| 855 | l.nop | ||
| 856 | l.jal schedule | ||
| 857 | l.nop | ||
| 858 | l.j _resume_userspace | ||
| 859 | l.nop | ||
| 860 | |||
| 861 | /* Handle pending signals and notify-resume requests. | ||
| 862 | * do_notify_resume must be passed the latest pushed pt_regs, not | ||
| 863 | * necessarily the "userspace" ones. Also, pt_regs->syscallno | ||
| 864 | * must be set so that the syscall restart functionality works. | ||
| 865 | */ | ||
| 866 | _work_notifysig: | ||
| 867 | l.jal do_notify_resume | ||
| 868 | l.ori r3,r1,0 /* pt_regs */ | ||
| 869 | |||
| 870 | _resume_userspace: | ||
| 871 | DISABLE_INTERRUPTS(r3,r4) | ||
| 872 | l.lwz r3,TI_FLAGS(r10) | ||
| 873 | l.andi r3,r3,_TIF_WORK_MASK | ||
| 874 | l.sfnei r3,0 | ||
| 875 | l.bf _work_pending | ||
| 876 | l.nop | ||
| 877 | |||
| 878 | _restore_all: | ||
| 879 | RESTORE_ALL | ||
| 880 | /* This returns to userspace code */ | ||
| 881 | |||
| 882 | |||
| 883 | ENTRY(_ret_from_intr) | ||
| 884 | ENTRY(_ret_from_exception) | ||
| 885 | l.lwz r4,PT_SR(r1) | ||
| 886 | l.andi r3,r4,SPR_SR_SM | ||
| 887 | l.sfeqi r3,0 | ||
| 888 | l.bnf _restore_all | ||
| 889 | l.nop | ||
| 890 | l.j _resume_userspace | ||
| 891 | l.nop | ||
| 892 | |||
| 893 | ENTRY(ret_from_fork) | ||
| 894 | l.jal schedule_tail | ||
| 895 | l.nop | ||
| 896 | |||
| 897 | /* _syscall_returns expect r11 to contain return value */ | ||
| 898 | l.lwz r11,PT_GPR11(r1) | ||
| 899 | |||
| 900 | /* The syscall fast path return expects call-saved registers | ||
| 901 | * r12-r28 to be untouched, so we restore them here as they | ||
| 902 | * will have been effectively clobbered when arriving here | ||
| 903 | * via the call to switch() | ||
| 904 | */ | ||
| 905 | l.lwz r12,PT_GPR12(r1) | ||
| 906 | l.lwz r14,PT_GPR14(r1) | ||
| 907 | l.lwz r16,PT_GPR16(r1) | ||
| 908 | l.lwz r18,PT_GPR18(r1) | ||
| 909 | l.lwz r20,PT_GPR20(r1) | ||
| 910 | l.lwz r22,PT_GPR22(r1) | ||
| 911 | l.lwz r24,PT_GPR24(r1) | ||
| 912 | l.lwz r26,PT_GPR26(r1) | ||
| 913 | l.lwz r28,PT_GPR28(r1) | ||
| 914 | |||
| 915 | l.j _syscall_return | ||
| 916 | l.nop | ||
| 917 | |||
| 918 | /* Since syscalls don't save call-clobbered registers, the args to | ||
| 919 | * kernel_thread_helper will need to be passed through callee-saved | ||
| 920 | * registers and copied to the parameter registers when the thread | ||
| 921 | * begins running. | ||
| 922 | * | ||
| 923 | * See arch/openrisc/kernel/process.c: | ||
| 924 | * The args are passed as follows: | ||
| 925 | * arg1 (r3) : passed in r20 | ||
| 926 | * arg2 (r4) : passed in r22 | ||
| 927 | */ | ||
| 928 | |||
| 929 | ENTRY(_kernel_thread_helper) | ||
| 930 | l.or r3,r20,r0 | ||
| 931 | l.or r4,r22,r0 | ||
| 932 | l.movhi r31,hi(kernel_thread_helper) | ||
| 933 | l.ori r31,r31,lo(kernel_thread_helper) | ||
| 934 | l.jr r31 | ||
| 935 | l.nop | ||
| 936 | |||
| 937 | |||
| 938 | /* ========================================================[ switch ] === */ | ||
| 939 | |||
| 940 | /* | ||
| 941 | * This routine switches between two different tasks. The process | ||
| 942 | * state of one is saved on its kernel stack. Then the state | ||
| 943 | * of the other is restored from its kernel stack. The memory | ||
| 944 | * management hardware is updated to the second process's state. | ||
| 945 | * Finally, we can return to the second process, via the 'return'. | ||
| 946 | * | ||
| 947 | * Note: there are two ways to get to the "going out" portion | ||
| 948 | * of this code; either by coming in via the entry (_switch) | ||
| 949 | * or via "fork" which must set up an environment equivalent | ||
| 950 | * to the "_switch" path. If you change this (or in particular, the | ||
| 951 | * SAVE_REGS macro), you'll have to change the fork code also. | ||
| 952 | */ | ||
| 953 | |||
| 954 | |||
| 955 | /* _switch MUST never lay on page boundry, cause it runs from | ||
| 956 | * effective addresses and beeing interrupted by iTLB miss would kill it. | ||
| 957 | * dTLB miss seams to never accour in the bad place since data accesses | ||
| 958 | * are from task structures which are always page aligned. | ||
| 959 | * | ||
| 960 | * The problem happens in RESTORE_ALL_NO_R11 where we first set the EPCR | ||
| 961 | * register, then load the previous register values and only at the end call | ||
| 962 | * the l.rfe instruction. If get TLB miss in beetwen the EPCR register gets | ||
| 963 | * garbled and we end up calling l.rfe with the wrong EPCR. (same probably | ||
| 964 | * holds for ESR) | ||
| 965 | * | ||
| 966 | * To avoid this problems it is sufficient to align _switch to | ||
| 967 | * some nice round number smaller than it's size... | ||
| 968 | */ | ||
| 969 | |||
| 970 | /* ABI rules apply here... we either enter _switch via schedule() or via | ||
| 971 | * an imaginary call to which we shall return at return_from_fork. Either | ||
| 972 | * way, we are a function call and only need to preserve the callee-saved | ||
| 973 | * registers when we return. As such, we don't need to save the registers | ||
| 974 | * on the stack that we won't be returning as they were... | ||
| 975 | */ | ||
| 976 | |||
| 977 | .align 0x400 | ||
| 978 | ENTRY(_switch) | ||
| 979 | /* We don't store SR as _switch only gets called in a context where | ||
| 980 | * the SR will be the same going in and coming out... */ | ||
| 981 | |||
| 982 | /* Set up new pt_regs struct for saving task state */ | ||
| 983 | l.addi r1,r1,-(INT_FRAME_SIZE) | ||
| 984 | |||
| 985 | /* No need to store r1/PT_SP as it goes into KSP below */ | ||
| 986 | l.sw PT_GPR2(r1),r2 | ||
| 987 | l.sw PT_GPR9(r1),r9 | ||
| 988 | /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being | ||
| 989 | * and expects r12 to be callee-saved... */ | ||
| 990 | l.sw PT_GPR12(r1),r12 | ||
| 991 | l.sw PT_GPR14(r1),r14 | ||
| 992 | l.sw PT_GPR16(r1),r16 | ||
| 993 | l.sw PT_GPR18(r1),r18 | ||
| 994 | l.sw PT_GPR20(r1),r20 | ||
| 995 | l.sw PT_GPR22(r1),r22 | ||
| 996 | l.sw PT_GPR24(r1),r24 | ||
| 997 | l.sw PT_GPR26(r1),r26 | ||
| 998 | l.sw PT_GPR28(r1),r28 | ||
| 999 | l.sw PT_GPR30(r1),r30 | ||
| 1000 | |||
| 1001 | l.addi r11,r10,0 /* Save old 'current' to 'last' return value*/ | ||
| 1002 | |||
| 1003 | /* We use thread_info->ksp for storing the address of the above | ||
| 1004 | * structure so that we can get back to it later... we don't want | ||
| 1005 | * to lose the value of thread_info->ksp, though, so store it as | ||
| 1006 | * pt_regs->sp so that we can easily restore it when we are made | ||
| 1007 | * live again... | ||
| 1008 | */ | ||
| 1009 | |||
| 1010 | /* Save the old value of thread_info->ksp as pt_regs->sp */ | ||
| 1011 | l.lwz r29,TI_KSP(r10) | ||
| 1012 | l.sw PT_SP(r1),r29 | ||
| 1013 | |||
| 1014 | /* Swap kernel stack pointers */ | ||
| 1015 | l.sw TI_KSP(r10),r1 /* Save old stack pointer */ | ||
| 1016 | l.or r10,r4,r0 /* Set up new current_thread_info */ | ||
| 1017 | l.lwz r1,TI_KSP(r10) /* Load new stack pointer */ | ||
| 1018 | |||
| 1019 | /* Restore the old value of thread_info->ksp */ | ||
| 1020 | l.lwz r29,PT_SP(r1) | ||
| 1021 | l.sw TI_KSP(r10),r29 | ||
| 1022 | |||
| 1023 | /* ...and restore the registers, except r11 because the return value | ||
| 1024 | * has already been set above. | ||
| 1025 | */ | ||
| 1026 | l.lwz r2,PT_GPR2(r1) | ||
| 1027 | l.lwz r9,PT_GPR9(r1) | ||
| 1028 | /* No need to restore r10 */ | ||
| 1029 | /* ...and do not restore r11 */ | ||
| 1030 | |||
| 1031 | /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being | ||
| 1032 | * and expects r12 to be callee-saved... */ | ||
| 1033 | l.lwz r12,PT_GPR12(r1) | ||
| 1034 | l.lwz r14,PT_GPR14(r1) | ||
| 1035 | l.lwz r16,PT_GPR16(r1) | ||
| 1036 | l.lwz r18,PT_GPR18(r1) | ||
| 1037 | l.lwz r20,PT_GPR20(r1) | ||
| 1038 | l.lwz r22,PT_GPR22(r1) | ||
| 1039 | l.lwz r24,PT_GPR24(r1) | ||
| 1040 | l.lwz r26,PT_GPR26(r1) | ||
| 1041 | l.lwz r28,PT_GPR28(r1) | ||
| 1042 | l.lwz r30,PT_GPR30(r1) | ||
| 1043 | |||
| 1044 | /* Unwind stack to pre-switch state */ | ||
| 1045 | l.addi r1,r1,(INT_FRAME_SIZE) | ||
| 1046 | |||
| 1047 | /* Return via the link-register back to where we 'came from', where that can be | ||
| 1048 | * either schedule() or return_from_fork()... */ | ||
| 1049 | l.jr r9 | ||
| 1050 | l.nop | ||
| 1051 | |||
| 1052 | /* ==================================================================== */ | ||
| 1053 | |||
| 1054 | /* These all use the delay slot for setting the argument register, so the | ||
| 1055 | * jump is always happening after the l.addi instruction. | ||
| 1056 | * | ||
| 1057 | * These are all just wrappers that don't touch the link-register r9, so the | ||
| 1058 | * return from the "real" syscall function will return back to the syscall | ||
| 1059 | * code that did the l.jal that brought us here. | ||
| 1060 | */ | ||
| 1061 | |||
| 1062 | /* fork requires that we save all the callee-saved registers because they | ||
| 1063 | * are all effectively clobbered by the call to _switch. Here we store | ||
| 1064 | * all the registers that aren't touched by the syscall fast path and thus | ||
| 1065 | * weren't saved there. | ||
| 1066 | */ | ||
| 1067 | |||
| 1068 | _fork_save_extra_regs_and_call: | ||
| 1069 | l.sw PT_GPR14(r1),r14 | ||
| 1070 | l.sw PT_GPR16(r1),r16 | ||
| 1071 | l.sw PT_GPR18(r1),r18 | ||
| 1072 | l.sw PT_GPR20(r1),r20 | ||
| 1073 | l.sw PT_GPR22(r1),r22 | ||
| 1074 | l.sw PT_GPR24(r1),r24 | ||
| 1075 | l.sw PT_GPR26(r1),r26 | ||
| 1076 | l.jr r29 | ||
| 1077 | l.sw PT_GPR28(r1),r28 | ||
| 1078 | |||
| 1079 | ENTRY(sys_clone) | ||
| 1080 | l.movhi r29,hi(_sys_clone) | ||
| 1081 | l.ori r29,r29,lo(_sys_clone) | ||
| 1082 | l.j _fork_save_extra_regs_and_call | ||
| 1083 | l.addi r7,r1,0 | ||
| 1084 | |||
| 1085 | ENTRY(sys_fork) | ||
| 1086 | l.movhi r29,hi(_sys_fork) | ||
| 1087 | l.ori r29,r29,lo(_sys_fork) | ||
| 1088 | l.j _fork_save_extra_regs_and_call | ||
| 1089 | l.addi r3,r1,0 | ||
| 1090 | |||
| 1091 | ENTRY(sys_execve) | ||
| 1092 | l.j _sys_execve | ||
| 1093 | l.addi r6,r1,0 | ||
| 1094 | |||
| 1095 | ENTRY(sys_sigaltstack) | ||
| 1096 | l.j _sys_sigaltstack | ||
| 1097 | l.addi r5,r1,0 | ||
| 1098 | |||
| 1099 | ENTRY(sys_rt_sigreturn) | ||
| 1100 | l.j _sys_rt_sigreturn | ||
| 1101 | l.addi r3,r1,0 | ||
| 1102 | |||
| 1103 | /* This is a catch-all syscall for atomic instructions for the OpenRISC 1000. | ||
| 1104 | * The functions takes a variable number of parameters depending on which | ||
| 1105 | * particular flavour of atomic you want... parameter 1 is a flag identifying | ||
| 1106 | * the atomic in question. Currently, this function implements the | ||
| 1107 | * following variants: | ||
| 1108 | * | ||
| 1109 | * XCHG: | ||
| 1110 | * @flag: 1 | ||
| 1111 | * @ptr1: | ||
| 1112 | * @ptr2: | ||
| 1113 | * Atomically exchange the values in pointers 1 and 2. | ||
| 1114 | * | ||
| 1115 | */ | ||
| 1116 | |||
| 1117 | ENTRY(sys_or1k_atomic) | ||
| 1118 | /* FIXME: This ignores r3 and always does an XCHG */ | ||
| 1119 | DISABLE_INTERRUPTS(r17,r19) | ||
| 1120 | l.lwz r30,0(r4) | ||
| 1121 | l.lwz r28,0(r5) | ||
| 1122 | l.sw 0(r4),r28 | ||
| 1123 | l.sw 0(r5),r30 | ||
| 1124 | ENABLE_INTERRUPTS(r17) | ||
| 1125 | l.jr r9 | ||
| 1126 | l.or r11,r0,r0 | ||
| 1127 | |||
| 1128 | /* ============================================================[ EOF ]=== */ | ||
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S new file mode 100644 index 000000000000..c75018d22644 --- /dev/null +++ b/arch/openrisc/kernel/head.S | |||
| @@ -0,0 +1,1607 @@ | |||
| 1 | /* | ||
| 2 | * OpenRISC head.S | ||
| 3 | * | ||
| 4 | * Linux architectural port borrowing liberally from similar works of | ||
| 5 | * others. All original copyrights apply as per the original source | ||
| 6 | * declaration. | ||
| 7 | * | ||
| 8 | * Modifications for the OpenRISC architecture: | ||
| 9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
| 10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or | ||
| 13 | * modify it under the terms of the GNU General Public License | ||
| 14 | * as published by the Free Software Foundation; either version | ||
| 15 | * 2 of the License, or (at your option) any later version. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/linkage.h> | ||
| 19 | #include <linux/threads.h> | ||
| 20 | #include <linux/errno.h> | ||
| 21 | #include <linux/init.h> | ||
| 22 | #include <asm/processor.h> | ||
| 23 | #include <asm/page.h> | ||
| 24 | #include <asm/mmu.h> | ||
| 25 | #include <asm/pgtable.h> | ||
| 26 | #include <asm/cache.h> | ||
| 27 | #include <asm/spr_defs.h> | ||
| 28 | #include <asm/asm-offsets.h> | ||
| 29 | |||
| 30 | #define tophys(rd,rs) \ | ||
| 31 | l.movhi rd,hi(-KERNELBASE) ;\ | ||
| 32 | l.add rd,rd,rs | ||
| 33 | |||
| 34 | #define CLEAR_GPR(gpr) \ | ||
| 35 | l.or gpr,r0,r0 | ||
| 36 | |||
| 37 | #define LOAD_SYMBOL_2_GPR(gpr,symbol) \ | ||
| 38 | l.movhi gpr,hi(symbol) ;\ | ||
| 39 | l.ori gpr,gpr,lo(symbol) | ||
| 40 | |||
| 41 | |||
| 42 | #define UART_BASE_ADD 0x90000000 | ||
| 43 | |||
| 44 | #define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM) | ||
| 45 | #define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM) | ||
| 46 | |||
| 47 | /* ============================================[ tmp store locations ]=== */ | ||
| 48 | |||
| 49 | /* | ||
| 50 | * emergency_print temporary stores | ||
| 51 | */ | ||
| 52 | #define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4 | ||
| 53 | #define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0) | ||
| 54 | |||
| 55 | #define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5 | ||
| 56 | #define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0) | ||
| 57 | |||
| 58 | #define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6 | ||
| 59 | #define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0) | ||
| 60 | |||
| 61 | #define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7 | ||
| 62 | #define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0) | ||
| 63 | |||
| 64 | #define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8 | ||
| 65 | #define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0) | ||
| 66 | |||
| 67 | #define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9 | ||
| 68 | #define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0) | ||
| 69 | |||
| 70 | |||
| 71 | /* | ||
| 72 | * TLB miss handlers temorary stores | ||
| 73 | */ | ||
| 74 | #define EXCEPTION_STORE_GPR9 l.sw 0x10(r0),r9 | ||
| 75 | #define EXCEPTION_LOAD_GPR9 l.lwz r9,0x10(r0) | ||
| 76 | |||
| 77 | #define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2 | ||
| 78 | #define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0) | ||
| 79 | |||
| 80 | #define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3 | ||
| 81 | #define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0) | ||
| 82 | |||
| 83 | #define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4 | ||
| 84 | #define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0) | ||
| 85 | |||
| 86 | #define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5 | ||
| 87 | #define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0) | ||
| 88 | |||
| 89 | #define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6 | ||
| 90 | #define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0) | ||
| 91 | |||
| 92 | |||
| 93 | /* | ||
| 94 | * EXCEPTION_HANDLE temporary stores | ||
| 95 | */ | ||
| 96 | |||
| 97 | #define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30 | ||
| 98 | #define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0) | ||
| 99 | |||
| 100 | #define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10 | ||
| 101 | #define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0) | ||
| 102 | |||
| 103 | #define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1 | ||
| 104 | #define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0) | ||
| 105 | |||
| 106 | /* | ||
| 107 | * For UNHANLDED_EXCEPTION | ||
| 108 | */ | ||
| 109 | |||
| 110 | #define EXCEPTION_T_STORE_GPR31 l.sw 0x84(r0),r31 | ||
| 111 | #define EXCEPTION_T_LOAD_GPR31(reg) l.lwz reg,0x84(r0) | ||
| 112 | |||
| 113 | /* =========================================================[ macros ]=== */ | ||
| 114 | |||
| 115 | |||
| 116 | #define GET_CURRENT_PGD(reg,t1) \ | ||
| 117 | LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\ | ||
| 118 | tophys (t1,reg) ;\ | ||
| 119 | l.lwz reg,0(t1) | ||
| 120 | |||
| 121 | |||
| 122 | /* | ||
| 123 | * DSCR: this is a common hook for handling exceptions. it will save | ||
| 124 | * the needed registers, set up stack and pointer to current | ||
| 125 | * then jump to the handler while enabling MMU | ||
| 126 | * | ||
| 127 | * PRMS: handler - a function to jump to. it has to save the | ||
| 128 | * remaining registers to kernel stack, call | ||
| 129 | * appropriate arch-independant exception handler | ||
| 130 | * and finaly jump to ret_from_except | ||
| 131 | * | ||
| 132 | * PREQ: unchanged state from the time exception happened | ||
| 133 | * | ||
| 134 | * POST: SAVED the following registers original value | ||
| 135 | * to the new created exception frame pointed to by r1 | ||
| 136 | * | ||
| 137 | * r1 - ksp pointing to the new (exception) frame | ||
| 138 | * r4 - EEAR exception EA | ||
| 139 | * r10 - current pointing to current_thread_info struct | ||
| 140 | * r12 - syscall 0, since we didn't come from syscall | ||
| 141 | * r13 - temp it actually contains new SR, not needed anymore | ||
| 142 | * r31 - handler address of the handler we'll jump to | ||
| 143 | * | ||
| 144 | * handler has to save remaining registers to the exception | ||
| 145 | * ksp frame *before* tainting them! | ||
| 146 | * | ||
| 147 | * NOTE: this function is not reentrant per se. reentrancy is guaranteed | ||
| 148 | * by processor disabling all exceptions/interrupts when exception | ||
| 149 | * accours. | ||
| 150 | * | ||
| 151 | * OPTM: no need to make it so wasteful to extract ksp when in user mode | ||
| 152 | */ | ||
| 153 | |||
| 154 | #define EXCEPTION_HANDLE(handler) \ | ||
| 155 | EXCEPTION_T_STORE_GPR30 ;\ | ||
| 156 | l.mfspr r30,r0,SPR_ESR_BASE ;\ | ||
| 157 | l.andi r30,r30,SPR_SR_SM ;\ | ||
| 158 | l.sfeqi r30,0 ;\ | ||
| 159 | EXCEPTION_T_STORE_GPR10 ;\ | ||
| 160 | l.bnf 2f /* kernel_mode */ ;\ | ||
| 161 | EXCEPTION_T_STORE_SP /* delay slot */ ;\ | ||
| 162 | 1: /* user_mode: */ ;\ | ||
| 163 | LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\ | ||
| 164 | tophys (r30,r1) ;\ | ||
| 165 | /* r10: current_thread_info */ ;\ | ||
| 166 | l.lwz r10,0(r30) ;\ | ||
| 167 | tophys (r30,r10) ;\ | ||
| 168 | l.lwz r1,(TI_KSP)(r30) ;\ | ||
| 169 | /* fall through */ ;\ | ||
| 170 | 2: /* kernel_mode: */ ;\ | ||
| 171 | /* create new stack frame, save only needed gprs */ ;\ | ||
| 172 | /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\ | ||
| 173 | /* r12: temp, syscall indicator */ ;\ | ||
| 174 | l.addi r1,r1,-(INT_FRAME_SIZE) ;\ | ||
| 175 | /* r1 is KSP, r30 is __pa(KSP) */ ;\ | ||
| 176 | tophys (r30,r1) ;\ | ||
| 177 | l.sw PT_GPR12(r30),r12 ;\ | ||
| 178 | l.mfspr r12,r0,SPR_EPCR_BASE ;\ | ||
| 179 | l.sw PT_PC(r30),r12 ;\ | ||
| 180 | l.mfspr r12,r0,SPR_ESR_BASE ;\ | ||
| 181 | l.sw PT_SR(r30),r12 ;\ | ||
| 182 | /* save r30 */ ;\ | ||
| 183 | EXCEPTION_T_LOAD_GPR30(r12) ;\ | ||
| 184 | l.sw PT_GPR30(r30),r12 ;\ | ||
| 185 | /* save r10 as was prior to exception */ ;\ | ||
| 186 | EXCEPTION_T_LOAD_GPR10(r12) ;\ | ||
| 187 | l.sw PT_GPR10(r30),r12 ;\ | ||
| 188 | /* save PT_SP as was prior to exception */ ;\ | ||
| 189 | EXCEPTION_T_LOAD_SP(r12) ;\ | ||
| 190 | l.sw PT_SP(r30),r12 ;\ | ||
| 191 | /* save exception r4, set r4 = EA */ ;\ | ||
| 192 | l.sw PT_GPR4(r30),r4 ;\ | ||
| 193 | l.mfspr r4,r0,SPR_EEAR_BASE ;\ | ||
| 194 | /* r12 == 1 if we come from syscall */ ;\ | ||
| 195 | CLEAR_GPR(r12) ;\ | ||
| 196 | /* ----- turn on MMU ----- */ ;\ | ||
| 197 | l.ori r30,r0,(EXCEPTION_SR) ;\ | ||
| 198 | l.mtspr r0,r30,SPR_ESR_BASE ;\ | ||
| 199 | /* r30: EA address of handler */ ;\ | ||
| 200 | LOAD_SYMBOL_2_GPR(r30,handler) ;\ | ||
| 201 | l.mtspr r0,r30,SPR_EPCR_BASE ;\ | ||
| 202 | l.rfe | ||
| 203 | |||
| 204 | /* | ||
| 205 | * this doesn't work | ||
| 206 | * | ||
| 207 | * | ||
| 208 | * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION | ||
| 209 | * #define UNHANDLED_EXCEPTION(handler) \ | ||
| 210 | * l.ori r3,r0,0x1 ;\ | ||
| 211 | * l.mtspr r0,r3,SPR_SR ;\ | ||
| 212 | * l.movhi r3,hi(0xf0000100) ;\ | ||
| 213 | * l.ori r3,r3,lo(0xf0000100) ;\ | ||
| 214 | * l.jr r3 ;\ | ||
| 215 | * l.nop 1 | ||
| 216 | * | ||
| 217 | * #endif | ||
| 218 | */ | ||
| 219 | |||
| 220 | /* DSCR: this is the same as EXCEPTION_HANDLE(), we are just | ||
| 221 | * a bit more carefull (if we have a PT_SP or current pointer | ||
| 222 | * corruption) and set them up from 'current_set' | ||
| 223 | * | ||
| 224 | */ | ||
| 225 | #define UNHANDLED_EXCEPTION(handler) \ | ||
| 226 | EXCEPTION_T_STORE_GPR31 ;\ | ||
| 227 | EXCEPTION_T_STORE_GPR10 ;\ | ||
| 228 | EXCEPTION_T_STORE_SP ;\ | ||
| 229 | /* temporary store r3, r9 into r1, r10 */ ;\ | ||
| 230 | l.addi r1,r3,0x0 ;\ | ||
| 231 | l.addi r10,r9,0x0 ;\ | ||
| 232 | /* the string referenced by r3 must be low enough */ ;\ | ||
| 233 | l.jal _emergency_print ;\ | ||
| 234 | l.ori r3,r0,lo(_string_unhandled_exception) ;\ | ||
| 235 | l.mfspr r3,r0,SPR_NPC ;\ | ||
| 236 | l.jal _emergency_print_nr ;\ | ||
| 237 | l.andi r3,r3,0x1f00 ;\ | ||
| 238 | /* the string referenced by r3 must be low enough */ ;\ | ||
| 239 | l.jal _emergency_print ;\ | ||
| 240 | l.ori r3,r0,lo(_string_epc_prefix) ;\ | ||
| 241 | l.jal _emergency_print_nr ;\ | ||
| 242 | l.mfspr r3,r0,SPR_EPCR_BASE ;\ | ||
| 243 | l.jal _emergency_print ;\ | ||
| 244 | l.ori r3,r0,lo(_string_nl) ;\ | ||
| 245 | /* end of printing */ ;\ | ||
| 246 | l.addi r3,r1,0x0 ;\ | ||
| 247 | l.addi r9,r10,0x0 ;\ | ||
| 248 | /* extract current, ksp from current_set */ ;\ | ||
| 249 | LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\ | ||
| 250 | LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\ | ||
| 251 | /* create new stack frame, save only needed gprs */ ;\ | ||
| 252 | /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\ | ||
| 253 | /* r12: temp, syscall indicator, r13 temp */ ;\ | ||
| 254 | l.addi r1,r1,-(INT_FRAME_SIZE) ;\ | ||
| 255 | /* r1 is KSP, r31 is __pa(KSP) */ ;\ | ||
| 256 | tophys (r31,r1) ;\ | ||
| 257 | l.sw PT_GPR12(r31),r12 ;\ | ||
| 258 | l.mfspr r12,r0,SPR_EPCR_BASE ;\ | ||
| 259 | l.sw PT_PC(r31),r12 ;\ | ||
| 260 | l.mfspr r12,r0,SPR_ESR_BASE ;\ | ||
| 261 | l.sw PT_SR(r31),r12 ;\ | ||
| 262 | /* save r31 */ ;\ | ||
| 263 | EXCEPTION_T_LOAD_GPR31(r12) ;\ | ||
| 264 | l.sw PT_GPR31(r31),r12 ;\ | ||
| 265 | /* save r10 as was prior to exception */ ;\ | ||
| 266 | EXCEPTION_T_LOAD_GPR10(r12) ;\ | ||
| 267 | l.sw PT_GPR10(r31),r12 ;\ | ||
| 268 | /* save PT_SP as was prior to exception */ ;\ | ||
| 269 | EXCEPTION_T_LOAD_SP(r12) ;\ | ||
| 270 | l.sw PT_SP(r31),r12 ;\ | ||
| 271 | l.sw PT_GPR13(r31),r13 ;\ | ||
| 272 | /* --> */ ;\ | ||
| 273 | /* save exception r4, set r4 = EA */ ;\ | ||
| 274 | l.sw PT_GPR4(r31),r4 ;\ | ||
| 275 | l.mfspr r4,r0,SPR_EEAR_BASE ;\ | ||
| 276 | /* r12 == 1 if we come from syscall */ ;\ | ||
| 277 | CLEAR_GPR(r12) ;\ | ||
| 278 | /* ----- play a MMU trick ----- */ ;\ | ||
| 279 | l.ori r31,r0,(EXCEPTION_SR) ;\ | ||
| 280 | l.mtspr r0,r31,SPR_ESR_BASE ;\ | ||
| 281 | /* r31: EA address of handler */ ;\ | ||
| 282 | LOAD_SYMBOL_2_GPR(r31,handler) ;\ | ||
| 283 | l.mtspr r0,r31,SPR_EPCR_BASE ;\ | ||
| 284 | l.rfe | ||
| 285 | |||
| 286 | /* =====================================================[ exceptions] === */ | ||
| 287 | |||
| 288 | /* ---[ 0x100: RESET exception ]----------------------------------------- */ | ||
| 289 | .org 0x100 | ||
| 290 | /* Jump to .init code at _start which lives in the .head section | ||
| 291 | * and will be discarded after boot. | ||
| 292 | */ | ||
| 293 | LOAD_SYMBOL_2_GPR(r4, _start) | ||
| 294 | tophys (r3,r4) /* MMU disabled */ | ||
| 295 | l.jr r3 | ||
| 296 | l.nop | ||
| 297 | |||
| 298 | /* ---[ 0x200: BUS exception ]------------------------------------------- */ | ||
| 299 | .org 0x200 | ||
| 300 | _dispatch_bus_fault: | ||
| 301 | EXCEPTION_HANDLE(_bus_fault_handler) | ||
| 302 | |||
| 303 | /* ---[ 0x300: Data Page Fault exception ]------------------------------- */ | ||
| 304 | .org 0x300 | ||
| 305 | _dispatch_do_dpage_fault: | ||
| 306 | // totaly disable timer interrupt | ||
| 307 | // l.mtspr r0,r0,SPR_TTMR | ||
| 308 | // DEBUG_TLB_PROBE(0x300) | ||
| 309 | // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300) | ||
| 310 | EXCEPTION_HANDLE(_data_page_fault_handler) | ||
| 311 | |||
| 312 | /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */ | ||
| 313 | .org 0x400 | ||
| 314 | _dispatch_do_ipage_fault: | ||
| 315 | // totaly disable timer interrupt | ||
| 316 | // l.mtspr r0,r0,SPR_TTMR | ||
| 317 | // DEBUG_TLB_PROBE(0x400) | ||
| 318 | // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400) | ||
| 319 | EXCEPTION_HANDLE(_insn_page_fault_handler) | ||
| 320 | |||
| 321 | /* ---[ 0x500: Timer exception ]----------------------------------------- */ | ||
| 322 | .org 0x500 | ||
| 323 | EXCEPTION_HANDLE(_timer_handler) | ||
| 324 | |||
| 325 | /* ---[ 0x600: Aligment exception ]-------------------------------------- */ | ||
| 326 | .org 0x600 | ||
| 327 | EXCEPTION_HANDLE(_alignment_handler) | ||
| 328 | |||
| 329 | /* ---[ 0x700: Illegal insn exception ]---------------------------------- */ | ||
| 330 | .org 0x700 | ||
| 331 | EXCEPTION_HANDLE(_illegal_instruction_handler) | ||
| 332 | |||
| 333 | /* ---[ 0x800: External interrupt exception ]---------------------------- */ | ||
| 334 | .org 0x800 | ||
| 335 | EXCEPTION_HANDLE(_external_irq_handler) | ||
| 336 | |||
| 337 | /* ---[ 0x900: DTLB miss exception ]------------------------------------- */ | ||
| 338 | .org 0x900 | ||
| 339 | l.j boot_dtlb_miss_handler | ||
| 340 | l.nop | ||
| 341 | |||
| 342 | /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */ | ||
| 343 | .org 0xa00 | ||
| 344 | l.j boot_itlb_miss_handler | ||
| 345 | l.nop | ||
| 346 | |||
| 347 | /* ---[ 0xb00: Range exception ]----------------------------------------- */ | ||
| 348 | .org 0xb00 | ||
| 349 | UNHANDLED_EXCEPTION(_vector_0xb00) | ||
| 350 | |||
| 351 | /* ---[ 0xc00: Syscall exception ]--------------------------------------- */ | ||
| 352 | .org 0xc00 | ||
| 353 | EXCEPTION_HANDLE(_sys_call_handler) | ||
| 354 | |||
| 355 | /* ---[ 0xd00: Trap exception ]------------------------------------------ */ | ||
| 356 | .org 0xd00 | ||
| 357 | UNHANDLED_EXCEPTION(_vector_0xd00) | ||
| 358 | |||
| 359 | /* ---[ 0xe00: Trap exception ]------------------------------------------ */ | ||
| 360 | .org 0xe00 | ||
| 361 | // UNHANDLED_EXCEPTION(_vector_0xe00) | ||
| 362 | EXCEPTION_HANDLE(_trap_handler) | ||
| 363 | |||
| 364 | /* ---[ 0xf00: Reserved exception ]-------------------------------------- */ | ||
| 365 | .org 0xf00 | ||
| 366 | UNHANDLED_EXCEPTION(_vector_0xf00) | ||
| 367 | |||
| 368 | /* ---[ 0x1000: Reserved exception ]------------------------------------- */ | ||
| 369 | .org 0x1000 | ||
| 370 | UNHANDLED_EXCEPTION(_vector_0x1000) | ||
| 371 | |||
| 372 | /* ---[ 0x1100: Reserved exception ]------------------------------------- */ | ||
| 373 | .org 0x1100 | ||
| 374 | UNHANDLED_EXCEPTION(_vector_0x1100) | ||
| 375 | |||
| 376 | /* ---[ 0x1200: Reserved exception ]------------------------------------- */ | ||
| 377 | .org 0x1200 | ||
| 378 | UNHANDLED_EXCEPTION(_vector_0x1200) | ||
| 379 | |||
| 380 | /* ---[ 0x1300: Reserved exception ]------------------------------------- */ | ||
| 381 | .org 0x1300 | ||
| 382 | UNHANDLED_EXCEPTION(_vector_0x1300) | ||
| 383 | |||
| 384 | /* ---[ 0x1400: Reserved exception ]------------------------------------- */ | ||
| 385 | .org 0x1400 | ||
| 386 | UNHANDLED_EXCEPTION(_vector_0x1400) | ||
| 387 | |||
| 388 | /* ---[ 0x1500: Reserved exception ]------------------------------------- */ | ||
| 389 | .org 0x1500 | ||
| 390 | UNHANDLED_EXCEPTION(_vector_0x1500) | ||
| 391 | |||
| 392 | /* ---[ 0x1600: Reserved exception ]------------------------------------- */ | ||
| 393 | .org 0x1600 | ||
| 394 | UNHANDLED_EXCEPTION(_vector_0x1600) | ||
| 395 | |||
| 396 | /* ---[ 0x1700: Reserved exception ]------------------------------------- */ | ||
| 397 | .org 0x1700 | ||
| 398 | UNHANDLED_EXCEPTION(_vector_0x1700) | ||
| 399 | |||
| 400 | /* ---[ 0x1800: Reserved exception ]------------------------------------- */ | ||
| 401 | .org 0x1800 | ||
| 402 | UNHANDLED_EXCEPTION(_vector_0x1800) | ||
| 403 | |||
| 404 | /* ---[ 0x1900: Reserved exception ]------------------------------------- */ | ||
| 405 | .org 0x1900 | ||
| 406 | UNHANDLED_EXCEPTION(_vector_0x1900) | ||
| 407 | |||
| 408 | /* ---[ 0x1a00: Reserved exception ]------------------------------------- */ | ||
| 409 | .org 0x1a00 | ||
| 410 | UNHANDLED_EXCEPTION(_vector_0x1a00) | ||
| 411 | |||
| 412 | /* ---[ 0x1b00: Reserved exception ]------------------------------------- */ | ||
| 413 | .org 0x1b00 | ||
| 414 | UNHANDLED_EXCEPTION(_vector_0x1b00) | ||
| 415 | |||
| 416 | /* ---[ 0x1c00: Reserved exception ]------------------------------------- */ | ||
| 417 | .org 0x1c00 | ||
| 418 | UNHANDLED_EXCEPTION(_vector_0x1c00) | ||
| 419 | |||
| 420 | /* ---[ 0x1d00: Reserved exception ]------------------------------------- */ | ||
| 421 | .org 0x1d00 | ||
| 422 | UNHANDLED_EXCEPTION(_vector_0x1d00) | ||
| 423 | |||
| 424 | /* ---[ 0x1e00: Reserved exception ]------------------------------------- */ | ||
| 425 | .org 0x1e00 | ||
| 426 | UNHANDLED_EXCEPTION(_vector_0x1e00) | ||
| 427 | |||
| 428 | /* ---[ 0x1f00: Reserved exception ]------------------------------------- */ | ||
| 429 | .org 0x1f00 | ||
| 430 | UNHANDLED_EXCEPTION(_vector_0x1f00) | ||
| 431 | |||
| 432 | .org 0x2000 | ||
| 433 | /* ===================================================[ kernel start ]=== */ | ||
| 434 | |||
| 435 | /* .text*/ | ||
| 436 | |||
| 437 | /* This early stuff belongs in HEAD, but some of the functions below definitely | ||
| 438 | * don't... */ | ||
| 439 | |||
| 440 | __HEAD | ||
| 441 | .global _start | ||
| 442 | _start: | ||
| 443 | /* | ||
| 444 | * ensure a deterministic start | ||
| 445 | */ | ||
| 446 | |||
| 447 | l.ori r3,r0,0x1 | ||
| 448 | l.mtspr r0,r3,SPR_SR | ||
| 449 | |||
| 450 | CLEAR_GPR(r1) | ||
| 451 | CLEAR_GPR(r2) | ||
| 452 | CLEAR_GPR(r3) | ||
| 453 | CLEAR_GPR(r4) | ||
| 454 | CLEAR_GPR(r5) | ||
| 455 | CLEAR_GPR(r6) | ||
| 456 | CLEAR_GPR(r7) | ||
| 457 | CLEAR_GPR(r8) | ||
| 458 | CLEAR_GPR(r9) | ||
| 459 | CLEAR_GPR(r10) | ||
| 460 | CLEAR_GPR(r11) | ||
| 461 | CLEAR_GPR(r12) | ||
| 462 | CLEAR_GPR(r13) | ||
| 463 | CLEAR_GPR(r14) | ||
| 464 | CLEAR_GPR(r15) | ||
| 465 | CLEAR_GPR(r16) | ||
| 466 | CLEAR_GPR(r17) | ||
| 467 | CLEAR_GPR(r18) | ||
| 468 | CLEAR_GPR(r19) | ||
| 469 | CLEAR_GPR(r20) | ||
| 470 | CLEAR_GPR(r21) | ||
| 471 | CLEAR_GPR(r22) | ||
| 472 | CLEAR_GPR(r23) | ||
| 473 | CLEAR_GPR(r24) | ||
| 474 | CLEAR_GPR(r25) | ||
| 475 | CLEAR_GPR(r26) | ||
| 476 | CLEAR_GPR(r27) | ||
| 477 | CLEAR_GPR(r28) | ||
| 478 | CLEAR_GPR(r29) | ||
| 479 | CLEAR_GPR(r30) | ||
| 480 | CLEAR_GPR(r31) | ||
| 481 | |||
| 482 | /* | ||
| 483 | * set up initial ksp and current | ||
| 484 | */ | ||
| 485 | LOAD_SYMBOL_2_GPR(r1,init_thread_union+0x2000) // setup kernel stack | ||
| 486 | LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current | ||
| 487 | tophys (r31,r10) | ||
| 488 | l.sw TI_KSP(r31), r1 | ||
| 489 | |||
| 490 | l.ori r4,r0,0x0 | ||
| 491 | |||
| 492 | |||
| 493 | /* | ||
| 494 | * .data contains initialized data, | ||
| 495 | * .bss contains uninitialized data - clear it up | ||
| 496 | */ | ||
| 497 | clear_bss: | ||
| 498 | LOAD_SYMBOL_2_GPR(r24, __bss_start) | ||
| 499 | LOAD_SYMBOL_2_GPR(r26, _end) | ||
| 500 | tophys(r28,r24) | ||
| 501 | tophys(r30,r26) | ||
| 502 | CLEAR_GPR(r24) | ||
| 503 | CLEAR_GPR(r26) | ||
| 504 | 1: | ||
| 505 | l.sw (0)(r28),r0 | ||
| 506 | l.sfltu r28,r30 | ||
| 507 | l.bf 1b | ||
| 508 | l.addi r28,r28,4 | ||
| 509 | |||
| 510 | enable_ic: | ||
| 511 | l.jal _ic_enable | ||
| 512 | l.nop | ||
| 513 | |||
| 514 | enable_dc: | ||
| 515 | l.jal _dc_enable | ||
| 516 | l.nop | ||
| 517 | |||
| 518 | flush_tlb: | ||
| 519 | /* | ||
| 520 | * I N V A L I D A T E T L B e n t r i e s | ||
| 521 | */ | ||
| 522 | LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0)) | ||
| 523 | LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0)) | ||
| 524 | l.addi r7,r0,128 /* Maximum number of sets */ | ||
| 525 | 1: | ||
| 526 | l.mtspr r5,r0,0x0 | ||
| 527 | l.mtspr r6,r0,0x0 | ||
| 528 | |||
| 529 | l.addi r5,r5,1 | ||
| 530 | l.addi r6,r6,1 | ||
| 531 | l.sfeq r7,r0 | ||
| 532 | l.bnf 1b | ||
| 533 | l.addi r7,r7,-1 | ||
| 534 | |||
| 535 | |||
| 536 | /* The MMU needs to be enabled before or32_early_setup is called */ | ||
| 537 | |||
| 538 | enable_mmu: | ||
| 539 | /* | ||
| 540 | * enable dmmu & immu | ||
| 541 | * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0 | ||
| 542 | */ | ||
| 543 | l.mfspr r30,r0,SPR_SR | ||
| 544 | l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME) | ||
| 545 | l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME) | ||
| 546 | l.or r30,r30,r28 | ||
| 547 | l.mtspr r0,r30,SPR_SR | ||
| 548 | l.nop | ||
| 549 | l.nop | ||
| 550 | l.nop | ||
| 551 | l.nop | ||
| 552 | l.nop | ||
| 553 | l.nop | ||
| 554 | l.nop | ||
| 555 | l.nop | ||
| 556 | l.nop | ||
| 557 | l.nop | ||
| 558 | l.nop | ||
| 559 | l.nop | ||
| 560 | l.nop | ||
| 561 | l.nop | ||
| 562 | l.nop | ||
| 563 | l.nop | ||
| 564 | |||
| 565 | // reset the simulation counters | ||
| 566 | l.nop 5 | ||
| 567 | |||
| 568 | LOAD_SYMBOL_2_GPR(r24, or32_early_setup) | ||
| 569 | l.jalr r24 | ||
| 570 | l.nop | ||
| 571 | |||
| 572 | clear_regs: | ||
| 573 | /* | ||
| 574 | * clear all GPRS to increase determinism | ||
| 575 | */ | ||
| 576 | CLEAR_GPR(r2) | ||
| 577 | CLEAR_GPR(r3) | ||
| 578 | CLEAR_GPR(r4) | ||
| 579 | CLEAR_GPR(r5) | ||
| 580 | CLEAR_GPR(r6) | ||
| 581 | CLEAR_GPR(r7) | ||
| 582 | CLEAR_GPR(r8) | ||
| 583 | CLEAR_GPR(r9) | ||
| 584 | CLEAR_GPR(r11) | ||
| 585 | CLEAR_GPR(r12) | ||
| 586 | CLEAR_GPR(r13) | ||
| 587 | CLEAR_GPR(r14) | ||
| 588 | CLEAR_GPR(r15) | ||
| 589 | CLEAR_GPR(r16) | ||
| 590 | CLEAR_GPR(r17) | ||
| 591 | CLEAR_GPR(r18) | ||
| 592 | CLEAR_GPR(r19) | ||
| 593 | CLEAR_GPR(r20) | ||
| 594 | CLEAR_GPR(r21) | ||
| 595 | CLEAR_GPR(r22) | ||
| 596 | CLEAR_GPR(r23) | ||
| 597 | CLEAR_GPR(r24) | ||
| 598 | CLEAR_GPR(r25) | ||
| 599 | CLEAR_GPR(r26) | ||
| 600 | CLEAR_GPR(r27) | ||
| 601 | CLEAR_GPR(r28) | ||
| 602 | CLEAR_GPR(r29) | ||
| 603 | CLEAR_GPR(r30) | ||
| 604 | CLEAR_GPR(r31) | ||
| 605 | |||
| 606 | jump_start_kernel: | ||
| 607 | /* | ||
| 608 | * jump to kernel entry (start_kernel) | ||
| 609 | */ | ||
| 610 | LOAD_SYMBOL_2_GPR(r30, start_kernel) | ||
| 611 | l.jr r30 | ||
| 612 | l.nop | ||
| 613 | |||
| 614 | /* ========================================[ cache ]=== */ | ||
| 615 | |||
| 616 | /* aligment here so we don't change memory offsets with | ||
| 617 | * memory controler defined | ||
| 618 | */ | ||
| 619 | .align 0x2000 | ||
| 620 | |||
| 621 | _ic_enable: | ||
| 622 | /* Check if IC present and skip enabling otherwise */ | ||
| 623 | l.mfspr r24,r0,SPR_UPR | ||
| 624 | l.andi r26,r24,SPR_UPR_ICP | ||
| 625 | l.sfeq r26,r0 | ||
| 626 | l.bf 9f | ||
| 627 | l.nop | ||
| 628 | |||
| 629 | /* Disable IC */ | ||
| 630 | l.mfspr r6,r0,SPR_SR | ||
| 631 | l.addi r5,r0,-1 | ||
| 632 | l.xori r5,r5,SPR_SR_ICE | ||
| 633 | l.and r5,r6,r5 | ||
| 634 | l.mtspr r0,r5,SPR_SR | ||
| 635 | |||
| 636 | /* Establish cache block size | ||
| 637 | If BS=0, 16; | ||
| 638 | If BS=1, 32; | ||
| 639 | r14 contain block size | ||
| 640 | */ | ||
| 641 | l.mfspr r24,r0,SPR_ICCFGR | ||
| 642 | l.andi r26,r24,SPR_ICCFGR_CBS | ||
| 643 | l.srli r28,r26,7 | ||
| 644 | l.ori r30,r0,16 | ||
| 645 | l.sll r14,r30,r28 | ||
| 646 | |||
| 647 | /* Establish number of cache sets | ||
| 648 | r16 contains number of cache sets | ||
| 649 | r28 contains log(# of cache sets) | ||
| 650 | */ | ||
| 651 | l.andi r26,r24,SPR_ICCFGR_NCS | ||
| 652 | l.srli r28,r26,3 | ||
| 653 | l.ori r30,r0,1 | ||
| 654 | l.sll r16,r30,r28 | ||
| 655 | |||
| 656 | /* Invalidate IC */ | ||
| 657 | l.addi r6,r0,0 | ||
| 658 | l.sll r5,r14,r28 | ||
| 659 | // l.mul r5,r14,r16 | ||
| 660 | // l.trap 1 | ||
| 661 | // l.addi r5,r0,IC_SIZE | ||
| 662 | 1: | ||
| 663 | l.mtspr r0,r6,SPR_ICBIR | ||
| 664 | l.sfne r6,r5 | ||
| 665 | l.bf 1b | ||
| 666 | l.add r6,r6,r14 | ||
| 667 | // l.addi r6,r6,IC_LINE | ||
| 668 | |||
| 669 | /* Enable IC */ | ||
| 670 | l.mfspr r6,r0,SPR_SR | ||
| 671 | l.ori r6,r6,SPR_SR_ICE | ||
| 672 | l.mtspr r0,r6,SPR_SR | ||
| 673 | l.nop | ||
| 674 | l.nop | ||
| 675 | l.nop | ||
| 676 | l.nop | ||
| 677 | l.nop | ||
| 678 | l.nop | ||
| 679 | l.nop | ||
| 680 | l.nop | ||
| 681 | l.nop | ||
| 682 | l.nop | ||
| 683 | 9: | ||
| 684 | l.jr r9 | ||
| 685 | l.nop | ||
| 686 | |||
| 687 | _dc_enable: | ||
| 688 | /* Check if DC present and skip enabling otherwise */ | ||
| 689 | l.mfspr r24,r0,SPR_UPR | ||
| 690 | l.andi r26,r24,SPR_UPR_DCP | ||
| 691 | l.sfeq r26,r0 | ||
| 692 | l.bf 9f | ||
| 693 | l.nop | ||
| 694 | |||
| 695 | /* Disable DC */ | ||
| 696 | l.mfspr r6,r0,SPR_SR | ||
| 697 | l.addi r5,r0,-1 | ||
| 698 | l.xori r5,r5,SPR_SR_DCE | ||
| 699 | l.and r5,r6,r5 | ||
| 700 | l.mtspr r0,r5,SPR_SR | ||
| 701 | |||
| 702 | /* Establish cache block size | ||
| 703 | If BS=0, 16; | ||
| 704 | If BS=1, 32; | ||
| 705 | r14 contain block size | ||
| 706 | */ | ||
| 707 | l.mfspr r24,r0,SPR_DCCFGR | ||
| 708 | l.andi r26,r24,SPR_DCCFGR_CBS | ||
| 709 | l.srli r28,r26,7 | ||
| 710 | l.ori r30,r0,16 | ||
| 711 | l.sll r14,r30,r28 | ||
| 712 | |||
| 713 | /* Establish number of cache sets | ||
| 714 | r16 contains number of cache sets | ||
| 715 | r28 contains log(# of cache sets) | ||
| 716 | */ | ||
| 717 | l.andi r26,r24,SPR_DCCFGR_NCS | ||
| 718 | l.srli r28,r26,3 | ||
| 719 | l.ori r30,r0,1 | ||
| 720 | l.sll r16,r30,r28 | ||
| 721 | |||
| 722 | /* Invalidate DC */ | ||
| 723 | l.addi r6,r0,0 | ||
| 724 | l.sll r5,r14,r28 | ||
| 725 | 1: | ||
| 726 | l.mtspr r0,r6,SPR_DCBIR | ||
| 727 | l.sfne r6,r5 | ||
| 728 | l.bf 1b | ||
| 729 | l.add r6,r6,r14 | ||
| 730 | |||
| 731 | /* Enable DC */ | ||
| 732 | l.mfspr r6,r0,SPR_SR | ||
| 733 | l.ori r6,r6,SPR_SR_DCE | ||
| 734 | l.mtspr r0,r6,SPR_SR | ||
| 735 | 9: | ||
| 736 | l.jr r9 | ||
| 737 | l.nop | ||
| 738 | |||
| 739 | /* ===============================================[ page table masks ]=== */ | ||
| 740 | |||
| 741 | /* bit 4 is used in hardware as write back cache bit. we never use this bit | ||
| 742 | * explicitly, so we can reuse it as _PAGE_FILE bit and mask it out when | ||
| 743 | * writing into hardware pte's | ||
| 744 | */ | ||
| 745 | |||
| 746 | #define DTLB_UP_CONVERT_MASK 0x3fa | ||
| 747 | #define ITLB_UP_CONVERT_MASK 0x3a | ||
| 748 | |||
| 749 | /* for SMP we'd have (this is a bit subtle, CC must be always set | ||
| 750 | * for SMP, but since we have _PAGE_PRESENT bit always defined | ||
| 751 | * we can just modify the mask) | ||
| 752 | */ | ||
| 753 | #define DTLB_SMP_CONVERT_MASK 0x3fb | ||
| 754 | #define ITLB_SMP_CONVERT_MASK 0x3b | ||
| 755 | |||
| 756 | /* ---[ boot dtlb miss handler ]----------------------------------------- */ | ||
| 757 | |||
| 758 | boot_dtlb_miss_handler: | ||
| 759 | |||
| 760 | /* mask for DTLB_MR register: - (0) sets V (valid) bit, | ||
| 761 | * - (31-12) sets bits belonging to VPN (31-12) | ||
| 762 | */ | ||
| 763 | #define DTLB_MR_MASK 0xfffff001 | ||
| 764 | |||
| 765 | /* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit, | ||
| 766 | * - (4) sets A (access) bit, | ||
| 767 | * - (5) sets D (dirty) bit, | ||
| 768 | * - (8) sets SRE (superuser read) bit | ||
| 769 | * - (9) sets SWE (superuser write) bit | ||
| 770 | * - (31-12) sets bits belonging to VPN (31-12) | ||
| 771 | */ | ||
| 772 | #define DTLB_TR_MASK 0xfffff332 | ||
| 773 | |||
| 774 | /* These are for masking out the VPN/PPN value from the MR/TR registers... | ||
| 775 | * it's not the same as the PFN */ | ||
| 776 | #define VPN_MASK 0xfffff000 | ||
| 777 | #define PPN_MASK 0xfffff000 | ||
| 778 | |||
| 779 | |||
| 780 | EXCEPTION_STORE_GPR6 | ||
| 781 | |||
| 782 | #if 0 | ||
| 783 | l.mfspr r6,r0,SPR_ESR_BASE // | ||
| 784 | l.andi r6,r6,SPR_SR_SM // are we in kernel mode ? | ||
| 785 | l.sfeqi r6,0 // r6 == 0x1 --> SM | ||
| 786 | l.bf exit_with_no_dtranslation // | ||
| 787 | l.nop | ||
| 788 | #endif | ||
| 789 | |||
| 790 | /* this could be optimized by moving storing of | ||
| 791 | * non r6 registers here, and jumping r6 restore | ||
| 792 | * if not in supervisor mode | ||
| 793 | */ | ||
| 794 | |||
| 795 | EXCEPTION_STORE_GPR2 | ||
| 796 | EXCEPTION_STORE_GPR3 | ||
| 797 | EXCEPTION_STORE_GPR4 | ||
| 798 | EXCEPTION_STORE_GPR5 | ||
| 799 | |||
| 800 | l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA | ||
| 801 | |||
| 802 | immediate_translation: | ||
| 803 | CLEAR_GPR(r6) | ||
| 804 | |||
| 805 | l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb) | ||
| 806 | |||
| 807 | l.mfspr r6, r0, SPR_DMMUCFGR | ||
| 808 | l.andi r6, r6, SPR_DMMUCFGR_NTS | ||
| 809 | l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF | ||
| 810 | l.ori r5, r0, 0x1 | ||
| 811 | l.sll r5, r5, r6 // r5 = number DMMU sets | ||
| 812 | l.addi r6, r5, -1 // r6 = nsets mask | ||
| 813 | l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK | ||
| 814 | |||
| 815 | l.or r6,r6,r4 // r6 <- r4 | ||
| 816 | l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff | ||
| 817 | l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000 | ||
| 818 | l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK | ||
| 819 | l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry | ||
| 820 | l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR | ||
| 821 | |||
| 822 | /* set up DTLB with no translation for EA <= 0xbfffffff */ | ||
| 823 | LOAD_SYMBOL_2_GPR(r6,0xbfffffff) | ||
| 824 | l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA) | ||
| 825 | l.bf 1f // goto out | ||
| 826 | l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1) | ||
| 827 | |||
| 828 | tophys(r3,r4) // r3 <- PA | ||
| 829 | 1: | ||
| 830 | l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff | ||
| 831 | l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000 | ||
| 832 | l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK | ||
| 833 | l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry | ||
| 834 | l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR | ||
| 835 | |||
| 836 | EXCEPTION_LOAD_GPR6 | ||
| 837 | EXCEPTION_LOAD_GPR5 | ||
| 838 | EXCEPTION_LOAD_GPR4 | ||
| 839 | EXCEPTION_LOAD_GPR3 | ||
| 840 | EXCEPTION_LOAD_GPR2 | ||
| 841 | |||
| 842 | l.rfe // SR <- ESR, PC <- EPC | ||
| 843 | |||
| 844 | exit_with_no_dtranslation: | ||
| 845 | /* EA out of memory or not in supervisor mode */ | ||
| 846 | EXCEPTION_LOAD_GPR6 | ||
| 847 | EXCEPTION_LOAD_GPR4 | ||
| 848 | l.j _dispatch_bus_fault | ||
| 849 | |||
| 850 | /* ---[ boot itlb miss handler ]----------------------------------------- */ | ||
| 851 | |||
| 852 | boot_itlb_miss_handler: | ||
| 853 | |||
| 854 | /* mask for ITLB_MR register: - sets V (valid) bit, | ||
| 855 | * - sets bits belonging to VPN (15-12) | ||
| 856 | */ | ||
| 857 | #define ITLB_MR_MASK 0xfffff001 | ||
| 858 | |||
| 859 | /* mask for ITLB_TR register: - sets A (access) bit, | ||
| 860 | * - sets SXE (superuser execute) bit | ||
| 861 | * - sets bits belonging to VPN (15-12) | ||
| 862 | */ | ||
| 863 | #define ITLB_TR_MASK 0xfffff050 | ||
| 864 | |||
| 865 | /* | ||
| 866 | #define VPN_MASK 0xffffe000 | ||
| 867 | #define PPN_MASK 0xffffe000 | ||
| 868 | */ | ||
| 869 | |||
| 870 | |||
| 871 | |||
| 872 | EXCEPTION_STORE_GPR2 | ||
| 873 | EXCEPTION_STORE_GPR3 | ||
| 874 | EXCEPTION_STORE_GPR4 | ||
| 875 | EXCEPTION_STORE_GPR5 | ||
| 876 | EXCEPTION_STORE_GPR6 | ||
| 877 | |||
| 878 | #if 0 | ||
| 879 | l.mfspr r6,r0,SPR_ESR_BASE // | ||
| 880 | l.andi r6,r6,SPR_SR_SM // are we in kernel mode ? | ||
| 881 | l.sfeqi r6,0 // r6 == 0x1 --> SM | ||
| 882 | l.bf exit_with_no_itranslation | ||
| 883 | l.nop | ||
| 884 | #endif | ||
| 885 | |||
| 886 | |||
| 887 | l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA | ||
| 888 | |||
| 889 | earlyearly: | ||
| 890 | CLEAR_GPR(r6) | ||
| 891 | |||
| 892 | l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb) | ||
| 893 | |||
| 894 | l.mfspr r6, r0, SPR_IMMUCFGR | ||
| 895 | l.andi r6, r6, SPR_IMMUCFGR_NTS | ||
| 896 | l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF | ||
| 897 | l.ori r5, r0, 0x1 | ||
| 898 | l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR | ||
| 899 | l.addi r6, r5, -1 // r6 = nsets mask | ||
| 900 | l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK | ||
| 901 | |||
| 902 | l.or r6,r6,r4 // r6 <- r4 | ||
| 903 | l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff | ||
| 904 | l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000 | ||
| 905 | l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK | ||
| 906 | l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry | ||
| 907 | l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR | ||
| 908 | |||
| 909 | /* | ||
| 910 | * set up ITLB with no translation for EA <= 0x0fffffff | ||
| 911 | * | ||
| 912 | * we need this for head.S mapping (EA = PA). if we move all functions | ||
| 913 | * which run with mmu enabled into entry.S, we might be able to eliminate this. | ||
| 914 | * | ||
| 915 | */ | ||
| 916 | LOAD_SYMBOL_2_GPR(r6,0x0fffffff) | ||
| 917 | l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA) | ||
| 918 | l.bf 1f // goto out | ||
| 919 | l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1) | ||
| 920 | |||
| 921 | tophys(r3,r4) // r3 <- PA | ||
| 922 | 1: | ||
| 923 | l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff | ||
| 924 | l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000 | ||
| 925 | l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK | ||
| 926 | l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry | ||
| 927 | l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR | ||
| 928 | |||
| 929 | EXCEPTION_LOAD_GPR6 | ||
| 930 | EXCEPTION_LOAD_GPR5 | ||
| 931 | EXCEPTION_LOAD_GPR4 | ||
| 932 | EXCEPTION_LOAD_GPR3 | ||
| 933 | EXCEPTION_LOAD_GPR2 | ||
| 934 | |||
| 935 | l.rfe // SR <- ESR, PC <- EPC | ||
| 936 | |||
| 937 | exit_with_no_itranslation: | ||
| 938 | EXCEPTION_LOAD_GPR4 | ||
| 939 | EXCEPTION_LOAD_GPR6 | ||
| 940 | l.j _dispatch_bus_fault | ||
| 941 | l.nop | ||
| 942 | |||
| 943 | /* ====================================================================== */ | ||
| 944 | /* | ||
| 945 | * Stuff below here shouldn't go into .head section... maybe this stuff | ||
| 946 | * can be moved to entry.S ??? | ||
| 947 | */ | ||
| 948 | |||
| 949 | /* ==============================================[ DTLB miss handler ]=== */ | ||
| 950 | |||
| 951 | /* | ||
| 952 | * Comments: | ||
| 953 | * Exception handlers are entered with MMU off so the following handler | ||
| 954 | * needs to use physical addressing | ||
| 955 | * | ||
| 956 | */ | ||
| 957 | |||
| 958 | .text | ||
| 959 | ENTRY(dtlb_miss_handler) | ||
| 960 | EXCEPTION_STORE_GPR2 | ||
| 961 | EXCEPTION_STORE_GPR3 | ||
| 962 | EXCEPTION_STORE_GPR4 | ||
| 963 | EXCEPTION_STORE_GPR5 | ||
| 964 | EXCEPTION_STORE_GPR6 | ||
| 965 | /* | ||
| 966 | * get EA of the miss | ||
| 967 | */ | ||
| 968 | l.mfspr r2,r0,SPR_EEAR_BASE | ||
| 969 | /* | ||
| 970 | * pmd = (pmd_t *)(current_pgd + pgd_index(daddr)); | ||
| 971 | */ | ||
| 972 | GET_CURRENT_PGD(r3,r5) // r3 is current_pgd, r5 is temp | ||
| 973 | l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2) | ||
| 974 | l.slli r4,r4,0x2 // to get address << 2 | ||
| 975 | l.add r5,r4,r3 // r4 is pgd_index(daddr) | ||
| 976 | /* | ||
| 977 | * if (pmd_none(*pmd)) | ||
| 978 | * goto pmd_none: | ||
| 979 | */ | ||
| 980 | tophys (r4,r5) | ||
| 981 | l.lwz r3,0x0(r4) // get *pmd value | ||
| 982 | l.sfne r3,r0 | ||
| 983 | l.bnf d_pmd_none | ||
| 984 | l.andi r3,r3,~PAGE_MASK //0x1fff // ~PAGE_MASK | ||
| 985 | /* | ||
| 986 | * if (pmd_bad(*pmd)) | ||
| 987 | * pmd_clear(pmd) | ||
| 988 | * goto pmd_bad: | ||
| 989 | */ | ||
| 990 | // l.sfeq r3,r0 // check *pmd value | ||
| 991 | // l.bf d_pmd_good | ||
| 992 | l.addi r3,r0,0xffffe000 // PAGE_MASK | ||
| 993 | // l.j d_pmd_bad | ||
| 994 | // l.sw 0x0(r4),r0 // clear pmd | ||
| 995 | d_pmd_good: | ||
| 996 | /* | ||
| 997 | * pte = *pte_offset(pmd, daddr); | ||
| 998 | */ | ||
| 999 | l.lwz r4,0x0(r4) // get **pmd value | ||
| 1000 | l.and r4,r4,r3 // & PAGE_MASK | ||
| 1001 | l.srli r5,r2,0xd // >> PAGE_SHIFT, r2 == EEAR | ||
| 1002 | l.andi r3,r5,0x7ff // (1UL << PAGE_SHIFT - 2) - 1 | ||
| 1003 | l.slli r3,r3,0x2 // to get address << 2 | ||
| 1004 | l.add r3,r3,r4 | ||
| 1005 | l.lwz r2,0x0(r3) // this is pte at last | ||
| 1006 | /* | ||
| 1007 | * if (!pte_present(pte)) | ||
| 1008 | */ | ||
| 1009 | l.andi r4,r2,0x1 | ||
| 1010 | l.sfne r4,r0 // is pte present | ||
| 1011 | l.bnf d_pte_not_present | ||
| 1012 | l.addi r3,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK | ||
| 1013 | /* | ||
| 1014 | * fill DTLB TR register | ||
| 1015 | */ | ||
| 1016 | l.and r4,r2,r3 // apply the mask | ||
| 1017 | // Determine number of DMMU sets | ||
| 1018 | l.mfspr r6, r0, SPR_DMMUCFGR | ||
| 1019 | l.andi r6, r6, SPR_DMMUCFGR_NTS | ||
| 1020 | l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF | ||
| 1021 | l.ori r3, r0, 0x1 | ||
| 1022 | l.sll r3, r3, r6 // r3 = number DMMU sets DMMUCFGR | ||
| 1023 | l.addi r6, r3, -1 // r6 = nsets mask | ||
| 1024 | l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1) | ||
| 1025 | //NUM_TLB_ENTRIES | ||
| 1026 | l.mtspr r5,r4,SPR_DTLBTR_BASE(0) | ||
| 1027 | /* | ||
| 1028 | * fill DTLB MR register | ||
| 1029 | */ | ||
| 1030 | l.mfspr r2,r0,SPR_EEAR_BASE | ||
| 1031 | l.addi r3,r0,0xffffe000 // PAGE_MASK | ||
| 1032 | l.and r4,r2,r3 // apply PAGE_MASK to EA (__PHX__ do we really need this?) | ||
| 1033 | l.ori r4,r4,0x1 // set hardware valid bit: DTBL_MR entry | ||
| 1034 | l.mtspr r5,r4,SPR_DTLBMR_BASE(0) | ||
| 1035 | |||
| 1036 | EXCEPTION_LOAD_GPR2 | ||
| 1037 | EXCEPTION_LOAD_GPR3 | ||
| 1038 | EXCEPTION_LOAD_GPR4 | ||
| 1039 | EXCEPTION_LOAD_GPR5 | ||
| 1040 | EXCEPTION_LOAD_GPR6 | ||
| 1041 | l.rfe | ||
| 1042 | d_pmd_bad: | ||
| 1043 | l.nop 1 | ||
| 1044 | EXCEPTION_LOAD_GPR2 | ||
| 1045 | EXCEPTION_LOAD_GPR3 | ||
| 1046 | EXCEPTION_LOAD_GPR4 | ||
| 1047 | EXCEPTION_LOAD_GPR5 | ||
| 1048 | EXCEPTION_LOAD_GPR6 | ||
| 1049 | l.rfe | ||
| 1050 | d_pmd_none: | ||
| 1051 | d_pte_not_present: | ||
| 1052 | EXCEPTION_LOAD_GPR2 | ||
| 1053 | EXCEPTION_LOAD_GPR3 | ||
| 1054 | EXCEPTION_LOAD_GPR4 | ||
| 1055 | EXCEPTION_LOAD_GPR5 | ||
| 1056 | EXCEPTION_LOAD_GPR6 | ||
| 1057 | l.j _dispatch_do_dpage_fault | ||
| 1058 | l.nop | ||
| 1059 | |||
| 1060 | /* ==============================================[ ITLB miss handler ]=== */ | ||
| 1061 | ENTRY(itlb_miss_handler) | ||
| 1062 | EXCEPTION_STORE_GPR2 | ||
| 1063 | EXCEPTION_STORE_GPR3 | ||
| 1064 | EXCEPTION_STORE_GPR4 | ||
| 1065 | EXCEPTION_STORE_GPR5 | ||
| 1066 | EXCEPTION_STORE_GPR6 | ||
| 1067 | /* | ||
| 1068 | * get EA of the miss | ||
| 1069 | */ | ||
| 1070 | l.mfspr r2,r0,SPR_EEAR_BASE | ||
| 1071 | |||
| 1072 | /* | ||
| 1073 | * pmd = (pmd_t *)(current_pgd + pgd_index(daddr)); | ||
| 1074 | * | ||
| 1075 | */ | ||
| 1076 | GET_CURRENT_PGD(r3,r5) // r3 is current_pgd, r5 is temp | ||
| 1077 | l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2) | ||
| 1078 | l.slli r4,r4,0x2 // to get address << 2 | ||
| 1079 | l.add r5,r4,r3 // r4 is pgd_index(daddr) | ||
| 1080 | /* | ||
| 1081 | * if (pmd_none(*pmd)) | ||
| 1082 | * goto pmd_none: | ||
| 1083 | */ | ||
| 1084 | tophys (r4,r5) | ||
| 1085 | l.lwz r3,0x0(r4) // get *pmd value | ||
| 1086 | l.sfne r3,r0 | ||
| 1087 | l.bnf i_pmd_none | ||
| 1088 | l.andi r3,r3,0x1fff // ~PAGE_MASK | ||
| 1089 | /* | ||
| 1090 | * if (pmd_bad(*pmd)) | ||
| 1091 | * pmd_clear(pmd) | ||
| 1092 | * goto pmd_bad: | ||
| 1093 | */ | ||
| 1094 | |||
| 1095 | // l.sfeq r3,r0 // check *pmd value | ||
| 1096 | // l.bf i_pmd_good | ||
| 1097 | l.addi r3,r0,0xffffe000 // PAGE_MASK | ||
| 1098 | // l.j i_pmd_bad | ||
| 1099 | // l.sw 0x0(r4),r0 // clear pmd | ||
| 1100 | |||
| 1101 | i_pmd_good: | ||
| 1102 | /* | ||
| 1103 | * pte = *pte_offset(pmd, iaddr); | ||
| 1104 | * | ||
| 1105 | */ | ||
| 1106 | l.lwz r4,0x0(r4) // get **pmd value | ||
| 1107 | l.and r4,r4,r3 // & PAGE_MASK | ||
| 1108 | l.srli r5,r2,0xd // >> PAGE_SHIFT, r2 == EEAR | ||
| 1109 | l.andi r3,r5,0x7ff // (1UL << PAGE_SHIFT - 2) - 1 | ||
| 1110 | l.slli r3,r3,0x2 // to get address << 2 | ||
| 1111 | l.add r3,r3,r4 | ||
| 1112 | l.lwz r2,0x0(r3) // this is pte at last | ||
| 1113 | /* | ||
| 1114 | * if (!pte_present(pte)) | ||
| 1115 | * | ||
| 1116 | */ | ||
| 1117 | l.andi r4,r2,0x1 | ||
| 1118 | l.sfne r4,r0 // is pte present | ||
| 1119 | l.bnf i_pte_not_present | ||
| 1120 | l.addi r3,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK | ||
| 1121 | /* | ||
| 1122 | * fill ITLB TR register | ||
| 1123 | */ | ||
| 1124 | l.and r4,r2,r3 // apply the mask | ||
| 1125 | l.andi r3,r2,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE | ||
| 1126 | // l.andi r3,r2,0x400 // _PAGE_EXEC | ||
| 1127 | l.sfeq r3,r0 | ||
| 1128 | l.bf itlb_tr_fill //_workaround | ||
| 1129 | // Determine number of IMMU sets | ||
| 1130 | l.mfspr r6, r0, SPR_IMMUCFGR | ||
| 1131 | l.andi r6, r6, SPR_IMMUCFGR_NTS | ||
| 1132 | l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF | ||
| 1133 | l.ori r3, r0, 0x1 | ||
| 1134 | l.sll r3, r3, r6 // r3 = number IMMU sets IMMUCFGR | ||
| 1135 | l.addi r6, r3, -1 // r6 = nsets mask | ||
| 1136 | l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1) | ||
| 1137 | |||
| 1138 | /* | ||
| 1139 | * __PHX__ :: fixme | ||
| 1140 | * we should not just blindly set executable flags, | ||
| 1141 | * but it does help with ping. the clean way would be to find out | ||
| 1142 | * (and fix it) why stack doesn't have execution permissions | ||
| 1143 | */ | ||
| 1144 | |||
| 1145 | itlb_tr_fill_workaround: | ||
| 1146 | l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE) | ||
| 1147 | itlb_tr_fill: | ||
| 1148 | l.mtspr r5,r4,SPR_ITLBTR_BASE(0) | ||
| 1149 | /* | ||
| 1150 | * fill DTLB MR register | ||
| 1151 | */ | ||
| 1152 | l.mfspr r2,r0,SPR_EEAR_BASE | ||
| 1153 | l.addi r3,r0,0xffffe000 // PAGE_MASK | ||
| 1154 | l.and r4,r2,r3 // apply PAGE_MASK to EA (__PHX__ do we really need this?) | ||
| 1155 | l.ori r4,r4,0x1 // set hardware valid bit: DTBL_MR entry | ||
| 1156 | l.mtspr r5,r4,SPR_ITLBMR_BASE(0) | ||
| 1157 | |||
| 1158 | EXCEPTION_LOAD_GPR2 | ||
| 1159 | EXCEPTION_LOAD_GPR3 | ||
| 1160 | EXCEPTION_LOAD_GPR4 | ||
| 1161 | EXCEPTION_LOAD_GPR5 | ||
| 1162 | EXCEPTION_LOAD_GPR6 | ||
| 1163 | l.rfe | ||
| 1164 | |||
| 1165 | i_pmd_bad: | ||
| 1166 | l.nop 1 | ||
| 1167 | EXCEPTION_LOAD_GPR2 | ||
| 1168 | EXCEPTION_LOAD_GPR3 | ||
| 1169 | EXCEPTION_LOAD_GPR4 | ||
| 1170 | EXCEPTION_LOAD_GPR5 | ||
| 1171 | EXCEPTION_LOAD_GPR6 | ||
| 1172 | l.rfe | ||
| 1173 | i_pmd_none: | ||
| 1174 | i_pte_not_present: | ||
| 1175 | EXCEPTION_LOAD_GPR2 | ||
| 1176 | EXCEPTION_LOAD_GPR3 | ||
| 1177 | EXCEPTION_LOAD_GPR4 | ||
| 1178 | EXCEPTION_LOAD_GPR5 | ||
| 1179 | EXCEPTION_LOAD_GPR6 | ||
| 1180 | l.j _dispatch_do_ipage_fault | ||
| 1181 | l.nop | ||
| 1182 | |||
| 1183 | /* ==============================================[ boot tlb handlers ]=== */ | ||
| 1184 | |||
| 1185 | |||
| 1186 | /* =================================================[ debugging aids ]=== */ | ||
| 1187 | |||
| 1188 | .align 64 | ||
| 1189 | _immu_trampoline: | ||
| 1190 | .space 64 | ||
| 1191 | _immu_trampoline_top: | ||
| 1192 | |||
| 1193 | #define TRAMP_SLOT_0 (0x0) | ||
| 1194 | #define TRAMP_SLOT_1 (0x4) | ||
| 1195 | #define TRAMP_SLOT_2 (0x8) | ||
| 1196 | #define TRAMP_SLOT_3 (0xc) | ||
| 1197 | #define TRAMP_SLOT_4 (0x10) | ||
| 1198 | #define TRAMP_SLOT_5 (0x14) | ||
| 1199 | #define TRAMP_FRAME_SIZE (0x18) | ||
| 1200 | |||
| 1201 | ENTRY(_immu_trampoline_workaround) | ||
| 1202 | // r2 EEA | ||
| 1203 | // r6 is physical EEA | ||
| 1204 | tophys(r6,r2) | ||
| 1205 | |||
| 1206 | LOAD_SYMBOL_2_GPR(r5,_immu_trampoline) | ||
| 1207 | tophys (r3,r5) // r3 is trampoline (physical) | ||
| 1208 | |||
| 1209 | LOAD_SYMBOL_2_GPR(r4,0x15000000) | ||
| 1210 | l.sw TRAMP_SLOT_0(r3),r4 | ||
| 1211 | l.sw TRAMP_SLOT_1(r3),r4 | ||
| 1212 | l.sw TRAMP_SLOT_4(r3),r4 | ||
| 1213 | l.sw TRAMP_SLOT_5(r3),r4 | ||
| 1214 | |||
| 1215 | // EPC = EEA - 0x4 | ||
| 1216 | l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address) | ||
| 1217 | l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data | ||
| 1218 | l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address) | ||
| 1219 | l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data | ||
| 1220 | |||
| 1221 | l.srli r5,r4,26 // check opcode for write access | ||
| 1222 | l.sfeqi r5,0 // l.j | ||
| 1223 | l.bf 0f | ||
| 1224 | l.sfeqi r5,0x11 // l.jr | ||
| 1225 | l.bf 1f | ||
| 1226 | l.sfeqi r5,1 // l.jal | ||
| 1227 | l.bf 2f | ||
| 1228 | l.sfeqi r5,0x12 // l.jalr | ||
| 1229 | l.bf 3f | ||
| 1230 | l.sfeqi r5,3 // l.bnf | ||
| 1231 | l.bf 4f | ||
| 1232 | l.sfeqi r5,4 // l.bf | ||
| 1233 | l.bf 5f | ||
| 1234 | 99: | ||
| 1235 | l.nop | ||
| 1236 | l.j 99b // should never happen | ||
| 1237 | l.nop 1 | ||
| 1238 | |||
| 1239 | // r2 is EEA | ||
| 1240 | // r3 is trampoline address (physical) | ||
| 1241 | // r4 is instruction | ||
| 1242 | // r6 is physical(EEA) | ||
| 1243 | // | ||
| 1244 | // r5 | ||
| 1245 | |||
| 1246 | 2: // l.jal | ||
| 1247 | |||
| 1248 | /* 19 20 aa aa l.movhi r9,0xaaaa | ||
| 1249 | * a9 29 bb bb l.ori r9,0xbbbb | ||
| 1250 | * | ||
| 1251 | * where 0xaaaabbbb is EEA + 0x4 shifted right 2 | ||
| 1252 | */ | ||
| 1253 | |||
| 1254 | l.addi r6,r2,0x4 // this is 0xaaaabbbb | ||
| 1255 | |||
| 1256 | // l.movhi r9,0xaaaa | ||
| 1257 | l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9 | ||
| 1258 | l.sh (TRAMP_SLOT_0+0x0)(r3),r5 | ||
| 1259 | l.srli r5,r6,16 | ||
| 1260 | l.sh (TRAMP_SLOT_0+0x2)(r3),r5 | ||
| 1261 | |||
| 1262 | // l.ori r9,0xbbbb | ||
| 1263 | l.ori r5,r0,0xa929 // 0xa929 == l.ori r9 | ||
| 1264 | l.sh (TRAMP_SLOT_1+0x0)(r3),r5 | ||
| 1265 | l.andi r5,r6,0xffff | ||
| 1266 | l.sh (TRAMP_SLOT_1+0x2)(r3),r5 | ||
| 1267 | |||
| 1268 | /* falthrough, need to set up new jump offset */ | ||
| 1269 | |||
| 1270 | |||
| 1271 | 0: // l.j | ||
| 1272 | l.slli r6,r4,6 // original offset shifted left 6 - 2 | ||
| 1273 | // l.srli r6,r6,6 // original offset shifted right 2 | ||
| 1274 | |||
| 1275 | l.slli r4,r2,4 // old jump position: EEA shifted left 4 | ||
| 1276 | // l.srli r4,r4,6 // old jump position: shifted right 2 | ||
| 1277 | |||
| 1278 | l.addi r5,r3,0xc // new jump position (physical) | ||
| 1279 | l.slli r5,r5,4 // new jump position: shifted left 4 | ||
| 1280 | |||
| 1281 | // calculate new jump offset | ||
| 1282 | // new_off = old_off + (old_jump - new_jump) | ||
| 1283 | |||
| 1284 | l.sub r5,r4,r5 // old_jump - new_jump | ||
| 1285 | l.add r5,r6,r5 // orig_off + (old_jump - new_jump) | ||
| 1286 | l.srli r5,r5,6 // new offset shifted right 2 | ||
| 1287 | |||
| 1288 | // r5 is new jump offset | ||
| 1289 | // l.j has opcode 0x0... | ||
| 1290 | l.sw TRAMP_SLOT_2(r3),r5 // write it back | ||
| 1291 | |||
| 1292 | l.j trampoline_out | ||
| 1293 | l.nop | ||
| 1294 | |||
| 1295 | /* ----------------------------- */ | ||
| 1296 | |||
| 1297 | 3: // l.jalr | ||
| 1298 | |||
| 1299 | /* 19 20 aa aa l.movhi r9,0xaaaa | ||
| 1300 | * a9 29 bb bb l.ori r9,0xbbbb | ||
| 1301 | * | ||
| 1302 | * where 0xaaaabbbb is EEA + 0x4 shifted right 2 | ||
| 1303 | */ | ||
| 1304 | |||
| 1305 | l.addi r6,r2,0x4 // this is 0xaaaabbbb | ||
| 1306 | |||
| 1307 | // l.movhi r9,0xaaaa | ||
| 1308 | l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9 | ||
| 1309 | l.sh (TRAMP_SLOT_0+0x0)(r3),r5 | ||
| 1310 | l.srli r5,r6,16 | ||
| 1311 | l.sh (TRAMP_SLOT_0+0x2)(r3),r5 | ||
| 1312 | |||
| 1313 | // l.ori r9,0xbbbb | ||
| 1314 | l.ori r5,r0,0xa929 // 0xa929 == l.ori r9 | ||
| 1315 | l.sh (TRAMP_SLOT_1+0x0)(r3),r5 | ||
| 1316 | l.andi r5,r6,0xffff | ||
| 1317 | l.sh (TRAMP_SLOT_1+0x2)(r3),r5 | ||
| 1318 | |||
| 1319 | l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction | ||
| 1320 | l.andi r5,r5,0x3ff // clear out opcode part | ||
| 1321 | l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr | ||
| 1322 | l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back | ||
| 1323 | |||
| 1324 | /* falthrough */ | ||
| 1325 | |||
| 1326 | 1: // l.jr | ||
| 1327 | l.j trampoline_out | ||
| 1328 | l.nop | ||
| 1329 | |||
| 1330 | /* ----------------------------- */ | ||
| 1331 | |||
| 1332 | 4: // l.bnf | ||
| 1333 | 5: // l.bf | ||
| 1334 | l.slli r6,r4,6 // original offset shifted left 6 - 2 | ||
| 1335 | // l.srli r6,r6,6 // original offset shifted right 2 | ||
| 1336 | |||
| 1337 | l.slli r4,r2,4 // old jump position: EEA shifted left 4 | ||
| 1338 | // l.srli r4,r4,6 // old jump position: shifted right 2 | ||
| 1339 | |||
| 1340 | l.addi r5,r3,0xc // new jump position (physical) | ||
| 1341 | l.slli r5,r5,4 // new jump position: shifted left 4 | ||
| 1342 | |||
| 1343 | // calculate new jump offset | ||
| 1344 | // new_off = old_off + (old_jump - new_jump) | ||
| 1345 | |||
| 1346 | l.add r6,r6,r4 // (orig_off + old_jump) | ||
| 1347 | l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump | ||
| 1348 | l.srli r6,r6,6 // new offset shifted right 2 | ||
| 1349 | |||
| 1350 | // r6 is new jump offset | ||
| 1351 | l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction | ||
| 1352 | l.srli r4,r4,16 | ||
| 1353 | l.andi r4,r4,0xfc00 // get opcode part | ||
| 1354 | l.slli r4,r4,16 | ||
| 1355 | l.or r6,r4,r6 // l.b(n)f new offset | ||
| 1356 | l.sw TRAMP_SLOT_2(r3),r6 // write it back | ||
| 1357 | |||
| 1358 | /* we need to add l.j to EEA + 0x8 */ | ||
| 1359 | tophys (r4,r2) // may not be needed (due to shifts down_ | ||
| 1360 | l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8) | ||
| 1361 | // jump position = r5 + 0x8 (0x8 compensated) | ||
| 1362 | l.sub r4,r4,r5 // jump offset = target - new_position + 0x8 | ||
| 1363 | |||
| 1364 | l.slli r4,r4,4 // the amount of info in imediate of jump | ||
| 1365 | l.srli r4,r4,6 // jump instruction with offset | ||
| 1366 | l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot | ||
| 1367 | |||
| 1368 | /* fallthrough */ | ||
| 1369 | |||
| 1370 | trampoline_out: | ||
| 1371 | // set up new EPC to point to our trampoline code | ||
| 1372 | LOAD_SYMBOL_2_GPR(r5,_immu_trampoline) | ||
| 1373 | l.mtspr r0,r5,SPR_EPCR_BASE | ||
| 1374 | |||
| 1375 | // immu_trampoline is (4x) CACHE_LINE aligned | ||
| 1376 | // and only 6 instructions long, | ||
| 1377 | // so we need to invalidate only 2 lines | ||
| 1378 | |||
| 1379 | /* Establish cache block size | ||
| 1380 | If BS=0, 16; | ||
| 1381 | If BS=1, 32; | ||
| 1382 | r14 contain block size | ||
| 1383 | */ | ||
| 1384 | l.mfspr r21,r0,SPR_ICCFGR | ||
| 1385 | l.andi r21,r21,SPR_ICCFGR_CBS | ||
| 1386 | l.srli r21,r21,7 | ||
| 1387 | l.ori r23,r0,16 | ||
| 1388 | l.sll r14,r23,r21 | ||
| 1389 | |||
| 1390 | l.mtspr r0,r5,SPR_ICBIR | ||
| 1391 | l.add r5,r5,r14 | ||
| 1392 | l.mtspr r0,r5,SPR_ICBIR | ||
| 1393 | |||
| 1394 | l.jr r9 | ||
| 1395 | l.nop | ||
| 1396 | |||
| 1397 | |||
| 1398 | /* | ||
| 1399 | * DSCR: prints a string referenced by r3. | ||
| 1400 | * | ||
| 1401 | * PRMS: r3 - address of the first character of null | ||
| 1402 | * terminated string to be printed | ||
| 1403 | * | ||
| 1404 | * PREQ: UART at UART_BASE_ADD has to be initialized | ||
| 1405 | * | ||
| 1406 | * POST: caller should be aware that r3, r9 are changed | ||
| 1407 | */ | ||
| 1408 | ENTRY(_emergency_print) | ||
| 1409 | EMERGENCY_PRINT_STORE_GPR4 | ||
| 1410 | EMERGENCY_PRINT_STORE_GPR5 | ||
| 1411 | EMERGENCY_PRINT_STORE_GPR6 | ||
| 1412 | EMERGENCY_PRINT_STORE_GPR7 | ||
| 1413 | 2: | ||
| 1414 | l.lbz r7,0(r3) | ||
| 1415 | l.sfeq r7,r0 | ||
| 1416 | l.bf 9f | ||
| 1417 | l.nop | ||
| 1418 | |||
| 1419 | // putc: | ||
| 1420 | l.movhi r4,hi(UART_BASE_ADD) | ||
| 1421 | |||
| 1422 | l.addi r6,r0,0x20 | ||
| 1423 | 1: l.lbz r5,5(r4) | ||
| 1424 | l.andi r5,r5,0x20 | ||
| 1425 | l.sfeq r5,r6 | ||
| 1426 | l.bnf 1b | ||
| 1427 | l.nop | ||
| 1428 | |||
| 1429 | l.sb 0(r4),r7 | ||
| 1430 | |||
| 1431 | l.addi r6,r0,0x60 | ||
| 1432 | 1: l.lbz r5,5(r4) | ||
| 1433 | l.andi r5,r5,0x60 | ||
| 1434 | l.sfeq r5,r6 | ||
| 1435 | l.bnf 1b | ||
| 1436 | l.nop | ||
| 1437 | |||
| 1438 | /* next character */ | ||
| 1439 | l.j 2b | ||
| 1440 | l.addi r3,r3,0x1 | ||
| 1441 | |||
| 1442 | 9: | ||
| 1443 | EMERGENCY_PRINT_LOAD_GPR7 | ||
| 1444 | EMERGENCY_PRINT_LOAD_GPR6 | ||
| 1445 | EMERGENCY_PRINT_LOAD_GPR5 | ||
| 1446 | EMERGENCY_PRINT_LOAD_GPR4 | ||
| 1447 | l.jr r9 | ||
| 1448 | l.nop | ||
| 1449 | |||
| 1450 | ENTRY(_emergency_print_nr) | ||
| 1451 | EMERGENCY_PRINT_STORE_GPR4 | ||
| 1452 | EMERGENCY_PRINT_STORE_GPR5 | ||
| 1453 | EMERGENCY_PRINT_STORE_GPR6 | ||
| 1454 | EMERGENCY_PRINT_STORE_GPR7 | ||
| 1455 | EMERGENCY_PRINT_STORE_GPR8 | ||
| 1456 | |||
| 1457 | l.addi r8,r0,32 // shift register | ||
| 1458 | |||
| 1459 | 1: /* remove leading zeros */ | ||
| 1460 | l.addi r8,r8,-0x4 | ||
| 1461 | l.srl r7,r3,r8 | ||
| 1462 | l.andi r7,r7,0xf | ||
| 1463 | |||
| 1464 | /* don't skip the last zero if number == 0x0 */ | ||
| 1465 | l.sfeqi r8,0x4 | ||
| 1466 | l.bf 2f | ||
| 1467 | l.nop | ||
| 1468 | |||
| 1469 | l.sfeq r7,r0 | ||
| 1470 | l.bf 1b | ||
| 1471 | l.nop | ||
| 1472 | |||
| 1473 | 2: | ||
| 1474 | l.srl r7,r3,r8 | ||
| 1475 | |||
| 1476 | l.andi r7,r7,0xf | ||
| 1477 | l.sflts r8,r0 | ||
| 1478 | l.bf 9f | ||
| 1479 | |||
| 1480 | l.sfgtui r7,0x9 | ||
| 1481 | l.bnf 8f | ||
| 1482 | l.nop | ||
| 1483 | l.addi r7,r7,0x27 | ||
| 1484 | |||
| 1485 | 8: | ||
| 1486 | l.addi r7,r7,0x30 | ||
| 1487 | // putc: | ||
| 1488 | l.movhi r4,hi(UART_BASE_ADD) | ||
| 1489 | |||
| 1490 | l.addi r6,r0,0x20 | ||
| 1491 | 1: l.lbz r5,5(r4) | ||
| 1492 | l.andi r5,r5,0x20 | ||
| 1493 | l.sfeq r5,r6 | ||
| 1494 | l.bnf 1b | ||
| 1495 | l.nop | ||
| 1496 | |||
| 1497 | l.sb 0(r4),r7 | ||
| 1498 | |||
| 1499 | l.addi r6,r0,0x60 | ||
| 1500 | 1: l.lbz r5,5(r4) | ||
| 1501 | l.andi r5,r5,0x60 | ||
| 1502 | l.sfeq r5,r6 | ||
| 1503 | l.bnf 1b | ||
| 1504 | l.nop | ||
| 1505 | |||
| 1506 | /* next character */ | ||
| 1507 | l.j 2b | ||
| 1508 | l.addi r8,r8,-0x4 | ||
| 1509 | |||
| 1510 | 9: | ||
| 1511 | EMERGENCY_PRINT_LOAD_GPR8 | ||
| 1512 | EMERGENCY_PRINT_LOAD_GPR7 | ||
| 1513 | EMERGENCY_PRINT_LOAD_GPR6 | ||
| 1514 | EMERGENCY_PRINT_LOAD_GPR5 | ||
| 1515 | EMERGENCY_PRINT_LOAD_GPR4 | ||
| 1516 | l.jr r9 | ||
| 1517 | l.nop | ||
| 1518 | |||
| 1519 | |||
| 1520 | /* | ||
| 1521 | * This should be used for debugging only. | ||
| 1522 | * It messes up the Linux early serial output | ||
| 1523 | * somehow, so use it sparingly and essentially | ||
| 1524 | * only if you need to debug something that goes wrong | ||
| 1525 | * before Linux gets the early serial going. | ||
| 1526 | * | ||
| 1527 | * Furthermore, you'll have to make sure you set the | ||
| 1528 | * UART_DEVISOR correctly according to the system | ||
| 1529 | * clock rate. | ||
| 1530 | * | ||
| 1531 | * | ||
| 1532 | */ | ||
| 1533 | |||
| 1534 | |||
| 1535 | |||
| 1536 | #define SYS_CLK 20000000 | ||
| 1537 | //#define SYS_CLK 1843200 | ||
| 1538 | #define OR32_CONSOLE_BAUD 115200 | ||
| 1539 | #define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD) | ||
| 1540 | |||
| 1541 | ENTRY(_early_uart_init) | ||
| 1542 | l.movhi r3,hi(UART_BASE_ADD) | ||
| 1543 | |||
| 1544 | l.addi r4,r0,0x7 | ||
| 1545 | l.sb 0x2(r3),r4 | ||
| 1546 | |||
| 1547 | l.addi r4,r0,0x0 | ||
| 1548 | l.sb 0x1(r3),r4 | ||
| 1549 | |||
| 1550 | l.addi r4,r0,0x3 | ||
| 1551 | l.sb 0x3(r3),r4 | ||
| 1552 | |||
| 1553 | l.lbz r5,3(r3) | ||
| 1554 | l.ori r4,r5,0x80 | ||
| 1555 | l.sb 0x3(r3),r4 | ||
| 1556 | l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff) | ||
| 1557 | l.sb UART_DLM(r3),r4 | ||
| 1558 | l.addi r4,r0,((UART_DIVISOR) & 0x000000ff) | ||
| 1559 | l.sb UART_DLL(r3),r4 | ||
| 1560 | l.sb 0x3(r3),r5 | ||
| 1561 | |||
| 1562 | l.jr r9 | ||
| 1563 | l.nop | ||
| 1564 | |||
| 1565 | _string_copying_linux: | ||
| 1566 | .string "\n\n\n\n\n\rCopying Linux... \0" | ||
| 1567 | |||
| 1568 | _string_ok_booting: | ||
| 1569 | .string "Ok, booting the kernel.\n\r\0" | ||
| 1570 | |||
| 1571 | _string_unhandled_exception: | ||
| 1572 | .string "\n\rRunarunaround: Unhandled exception 0x\0" | ||
| 1573 | |||
| 1574 | _string_epc_prefix: | ||
| 1575 | .string ": EPC=0x\0" | ||
| 1576 | |||
| 1577 | _string_nl: | ||
| 1578 | .string "\n\r\0" | ||
| 1579 | |||
| 1580 | .global _string_esr_irq_bug | ||
| 1581 | _string_esr_irq_bug: | ||
| 1582 | .string "\n\rESR external interrupt bug, for details look into entry.S\n\r\0" | ||
| 1583 | |||
| 1584 | |||
| 1585 | |||
| 1586 | /* ========================================[ page aligned structures ]=== */ | ||
| 1587 | |||
| 1588 | /* | ||
| 1589 | * .data section should be page aligned | ||
| 1590 | * (look into arch/or32/kernel/vmlinux.lds) | ||
| 1591 | */ | ||
| 1592 | .section .data,"aw" | ||
| 1593 | .align 8192 | ||
| 1594 | .global empty_zero_page | ||
| 1595 | empty_zero_page: | ||
| 1596 | .space 8192 | ||
| 1597 | |||
| 1598 | .global swapper_pg_dir | ||
| 1599 | swapper_pg_dir: | ||
| 1600 | .space 8192 | ||
| 1601 | |||
| 1602 | .global _unhandled_stack | ||
| 1603 | _unhandled_stack: | ||
| 1604 | .space 8192 | ||
| 1605 | _unhandled_stack_top: | ||
| 1606 | |||
| 1607 | /* ============================================================[ EOF ]=== */ | ||
diff --git a/arch/openrisc/kernel/init_task.c b/arch/openrisc/kernel/init_task.c new file mode 100644 index 000000000000..45744a384927 --- /dev/null +++ b/arch/openrisc/kernel/init_task.c | |||
| @@ -0,0 +1,41 @@ | |||
| 1 | /* | ||
| 2 | * OpenRISC init_task.c | ||
| 3 | * | ||
| 4 | * Linux architectural port borrowing liberally from similar works of | ||
| 5 | * others. All original copyrights apply as per the original source | ||
| 6 | * declaration. | ||
| 7 | * | ||
| 8 | * Modifications for the OpenRISC architecture: | ||
| 9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
| 10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or | ||
| 13 | * modify it under the terms of the GNU General Public License | ||
| 14 | * as published by the Free Software Foundation; either version | ||
| 15 | * 2 of the License, or (at your option) any later version. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/init_task.h> | ||
| 19 | #include <linux/mqueue.h> | ||
| 20 | |||
| 21 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
| 22 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
| 23 | |||
| 24 | /* | ||
| 25 | * Initial thread structure. | ||
| 26 | * | ||
| 27 | * We need to make sure that this is THREAD_SIZE aligned due to the | ||
| 28 | * way process stacks are handled. This is done by having a special | ||
| 29 | * "init_task" linker map entry.. | ||
| 30 | */ | ||
| 31 | union thread_union init_thread_union __init_task_data = { | ||
| 32 | INIT_THREAD_INFO(init_task) | ||
| 33 | }; | ||
| 34 | |||
| 35 | /* | ||
| 36 | * Initial task structure. | ||
| 37 | * | ||
| 38 | * All other task structs will be allocated on slabs in fork.c | ||
| 39 | */ | ||
| 40 | struct task_struct init_task = INIT_TASK(init_task); | ||
| 41 | EXPORT_SYMBOL(init_task); | ||
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c new file mode 100644 index 000000000000..1422f747f52b --- /dev/null +++ b/arch/openrisc/kernel/setup.c | |||
| @@ -0,0 +1,381 @@ | |||
| 1 | /* | ||
| 2 | * OpenRISC setup.c | ||
| 3 | * | ||
| 4 | * Linux architectural port borrowing liberally from similar works of | ||
| 5 | * others. All original copyrights apply as per the original source | ||
| 6 | * declaration. | ||
| 7 | * | ||
| 8 | * Modifications for the OpenRISC architecture: | ||
| 9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
| 10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or | ||
| 13 | * modify it under the terms of the GNU General Public License | ||
| 14 | * as published by the Free Software Foundation; either version | ||
| 15 | * 2 of the License, or (at your option) any later version. | ||
| 16 | * | ||
| 17 | * This file handles the architecture-dependent parts of initialization | ||
| 18 | */ | ||
| 19 | |||
| 20 | #include <linux/errno.h> | ||
| 21 | #include <linux/sched.h> | ||
| 22 | #include <linux/kernel.h> | ||
| 23 | #include <linux/mm.h> | ||
| 24 | #include <linux/stddef.h> | ||
| 25 | #include <linux/unistd.h> | ||
| 26 | #include <linux/ptrace.h> | ||
| 27 | #include <linux/slab.h> | ||
| 28 | #include <linux/tty.h> | ||
| 29 | #include <linux/ioport.h> | ||
| 30 | #include <linux/delay.h> | ||
| 31 | #include <linux/console.h> | ||
| 32 | #include <linux/init.h> | ||
| 33 | #include <linux/bootmem.h> | ||
| 34 | #include <linux/seq_file.h> | ||
| 35 | #include <linux/serial.h> | ||
| 36 | #include <linux/initrd.h> | ||
| 37 | #include <linux/of_fdt.h> | ||
| 38 | #include <linux/of.h> | ||
| 39 | #include <linux/memblock.h> | ||
| 40 | #include <linux/device.h> | ||
| 41 | #include <linux/of_platform.h> | ||
| 42 | |||
| 43 | #include <asm/segment.h> | ||
| 44 | #include <asm/system.h> | ||
| 45 | #include <asm/pgtable.h> | ||
| 46 | #include <asm/types.h> | ||
| 47 | #include <asm/setup.h> | ||
| 48 | #include <asm/io.h> | ||
| 49 | #include <asm/cpuinfo.h> | ||
| 50 | #include <asm/delay.h> | ||
| 51 | |||
| 52 | #include "vmlinux.h" | ||
| 53 | |||
| 54 | char __initdata cmd_line[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; | ||
| 55 | |||
| 56 | static unsigned long __init setup_memory(void) | ||
| 57 | { | ||
| 58 | unsigned long bootmap_size; | ||
| 59 | unsigned long ram_start_pfn; | ||
| 60 | unsigned long free_ram_start_pfn; | ||
| 61 | unsigned long ram_end_pfn; | ||
| 62 | phys_addr_t memory_start, memory_end; | ||
| 63 | struct memblock_region *region; | ||
| 64 | |||
| 65 | memory_end = memory_start = 0; | ||
| 66 | |||
| 67 | /* Find main memory where is the kernel */ | ||
| 68 | for_each_memblock(memory, region) { | ||
| 69 | memory_start = region->base; | ||
| 70 | memory_end = region->base + region->size; | ||
| 71 | printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__, | ||
| 72 | memory_start, memory_end); | ||
| 73 | } | ||
| 74 | |||
| 75 | if (!memory_end) { | ||
| 76 | panic("No memory!"); | ||
| 77 | } | ||
| 78 | |||
| 79 | ram_start_pfn = PFN_UP(memory_start); | ||
| 80 | /* free_ram_start_pfn is first page after kernel */ | ||
| 81 | free_ram_start_pfn = PFN_UP(__pa(&_end)); | ||
| 82 | ram_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); | ||
| 83 | |||
| 84 | max_pfn = ram_end_pfn; | ||
| 85 | |||
| 86 | /* | ||
| 87 | * initialize the boot-time allocator (with low memory only). | ||
| 88 | * | ||
| 89 | * This makes the memory from the end of the kernel to the end of | ||
| 90 | * RAM usable. | ||
| 91 | * init_bootmem sets the global values min_low_pfn, max_low_pfn. | ||
| 92 | */ | ||
| 93 | bootmap_size = init_bootmem(free_ram_start_pfn, | ||
| 94 | ram_end_pfn - ram_start_pfn); | ||
| 95 | free_bootmem(PFN_PHYS(free_ram_start_pfn), | ||
| 96 | (ram_end_pfn - free_ram_start_pfn) << PAGE_SHIFT); | ||
| 97 | reserve_bootmem(PFN_PHYS(free_ram_start_pfn), bootmap_size, | ||
| 98 | BOOTMEM_DEFAULT); | ||
| 99 | |||
| 100 | for_each_memblock(reserved, region) { | ||
| 101 | printk(KERN_INFO "Reserved - 0x%08x-0x%08x\n", | ||
| 102 | (u32) region->base, (u32) region->size); | ||
| 103 | reserve_bootmem(region->base, region->size, BOOTMEM_DEFAULT); | ||
| 104 | } | ||
| 105 | |||
| 106 | return ram_end_pfn; | ||
| 107 | } | ||
| 108 | |||
| 109 | struct cpuinfo cpuinfo; | ||
| 110 | |||
| 111 | static void print_cpuinfo(void) | ||
| 112 | { | ||
| 113 | unsigned long upr = mfspr(SPR_UPR); | ||
| 114 | unsigned long vr = mfspr(SPR_VR); | ||
| 115 | unsigned int version; | ||
| 116 | unsigned int revision; | ||
| 117 | |||
| 118 | version = (vr & SPR_VR_VER) >> 24; | ||
| 119 | revision = (vr & SPR_VR_REV); | ||
| 120 | |||
| 121 | printk(KERN_INFO "CPU: OpenRISC-%x (revision %d) @%d MHz\n", | ||
| 122 | version, revision, cpuinfo.clock_frequency / 1000000); | ||
| 123 | |||
| 124 | if (!(upr & SPR_UPR_UP)) { | ||
| 125 | printk(KERN_INFO | ||
| 126 | "-- no UPR register... unable to detect configuration\n"); | ||
| 127 | return; | ||
| 128 | } | ||
| 129 | |||
| 130 | if (upr & SPR_UPR_DCP) | ||
| 131 | printk(KERN_INFO | ||
| 132 | "-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n", | ||
| 133 | cpuinfo.dcache_size, cpuinfo.dcache_block_size, 1); | ||
| 134 | else | ||
| 135 | printk(KERN_INFO "-- dcache disabled\n"); | ||
| 136 | if (upr & SPR_UPR_ICP) | ||
| 137 | printk(KERN_INFO | ||
| 138 | "-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n", | ||
| 139 | cpuinfo.icache_size, cpuinfo.icache_block_size, 1); | ||
| 140 | else | ||
| 141 | printk(KERN_INFO "-- icache disabled\n"); | ||
| 142 | |||
| 143 | if (upr & SPR_UPR_DMP) | ||
| 144 | printk(KERN_INFO "-- dmmu: %4d entries, %lu way(s)\n", | ||
| 145 | 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2), | ||
| 146 | 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW)); | ||
| 147 | if (upr & SPR_UPR_IMP) | ||
| 148 | printk(KERN_INFO "-- immu: %4d entries, %lu way(s)\n", | ||
| 149 | 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2), | ||
| 150 | 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW)); | ||
| 151 | |||
| 152 | printk(KERN_INFO "-- additional features:\n"); | ||
| 153 | if (upr & SPR_UPR_DUP) | ||
| 154 | printk(KERN_INFO "-- debug unit\n"); | ||
| 155 | if (upr & SPR_UPR_PCUP) | ||
| 156 | printk(KERN_INFO "-- performance counters\n"); | ||
| 157 | if (upr & SPR_UPR_PMP) | ||
| 158 | printk(KERN_INFO "-- power management\n"); | ||
| 159 | if (upr & SPR_UPR_PICP) | ||
| 160 | printk(KERN_INFO "-- PIC\n"); | ||
| 161 | if (upr & SPR_UPR_TTP) | ||
| 162 | printk(KERN_INFO "-- timer\n"); | ||
| 163 | if (upr & SPR_UPR_CUP) | ||
| 164 | printk(KERN_INFO "-- custom unit(s)\n"); | ||
| 165 | } | ||
| 166 | |||
| 167 | void __init setup_cpuinfo(void) | ||
| 168 | { | ||
| 169 | struct device_node *cpu; | ||
| 170 | unsigned long iccfgr, dccfgr; | ||
| 171 | unsigned long cache_set_size, cache_ways; | ||
| 172 | |||
| 173 | cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481"); | ||
| 174 | if (!cpu) | ||
| 175 | panic("No compatible CPU found in device tree...\n"); | ||
| 176 | |||
| 177 | iccfgr = mfspr(SPR_ICCFGR); | ||
| 178 | cache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW); | ||
| 179 | cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3); | ||
| 180 | cpuinfo.icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7); | ||
| 181 | cpuinfo.icache_size = | ||
| 182 | cache_set_size * cache_ways * cpuinfo.icache_block_size; | ||
| 183 | |||
| 184 | dccfgr = mfspr(SPR_DCCFGR); | ||
| 185 | cache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW); | ||
| 186 | cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3); | ||
| 187 | cpuinfo.dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7); | ||
| 188 | cpuinfo.dcache_size = | ||
| 189 | cache_set_size * cache_ways * cpuinfo.dcache_block_size; | ||
| 190 | |||
| 191 | if (of_property_read_u32(cpu, "clock-frequency", | ||
| 192 | &cpuinfo.clock_frequency)) { | ||
| 193 | printk(KERN_WARNING | ||
| 194 | "Device tree missing CPU 'clock-frequency' parameter." | ||
| 195 | "Assuming frequency 25MHZ" | ||
| 196 | "This is probably not what you want."); | ||
| 197 | } | ||
| 198 | |||
| 199 | of_node_put(cpu); | ||
| 200 | |||
| 201 | print_cpuinfo(); | ||
| 202 | } | ||
| 203 | |||
| 204 | /** | ||
| 205 | * or32_early_setup | ||
| 206 | * | ||
| 207 | * Handles the pointer to the device tree that this kernel is to use | ||
| 208 | * for establishing the available platform devices. | ||
| 209 | * | ||
| 210 | * For now, this is limited to using the built-in device tree. In the future, | ||
| 211 | * it is intended that this function will take a pointer to the device tree | ||
| 212 | * that is potentially built-in, but potentially also passed in by the | ||
| 213 | * bootloader, or discovered by some equally clever means... | ||
| 214 | */ | ||
| 215 | |||
| 216 | void __init or32_early_setup(void) | ||
| 217 | { | ||
| 218 | |||
| 219 | early_init_devtree(__dtb_start); | ||
| 220 | |||
| 221 | printk(KERN_INFO "Compiled-in FDT at 0x%p\n", __dtb_start); | ||
| 222 | } | ||
| 223 | |||
| 224 | static int __init openrisc_device_probe(void) | ||
| 225 | { | ||
| 226 | of_platform_populate(NULL, NULL, NULL, NULL); | ||
| 227 | |||
| 228 | return 0; | ||
| 229 | } | ||
| 230 | |||
| 231 | device_initcall(openrisc_device_probe); | ||
| 232 | |||
| 233 | static inline unsigned long extract_value_bits(unsigned long reg, | ||
| 234 | short bit_nr, short width) | ||
| 235 | { | ||
| 236 | return (reg >> bit_nr) & (0 << width); | ||
| 237 | } | ||
| 238 | |||
| 239 | static inline unsigned long extract_value(unsigned long reg, unsigned long mask) | ||
| 240 | { | ||
| 241 | while (!(mask & 0x1)) { | ||
| 242 | reg = reg >> 1; | ||
| 243 | mask = mask >> 1; | ||
| 244 | } | ||
| 245 | return mask & reg; | ||
| 246 | } | ||
| 247 | |||
| 248 | void __init detect_unit_config(unsigned long upr, unsigned long mask, | ||
| 249 | char *text, void (*func) (void)) | ||
| 250 | { | ||
| 251 | if (text != NULL) | ||
| 252 | printk("%s", text); | ||
| 253 | |||
| 254 | if (upr & mask) { | ||
| 255 | if (func != NULL) | ||
| 256 | func(); | ||
| 257 | else | ||
| 258 | printk("present\n"); | ||
| 259 | } else | ||
| 260 | printk("not present\n"); | ||
| 261 | } | ||
| 262 | |||
| 263 | /* | ||
| 264 | * calibrate_delay | ||
| 265 | * | ||
| 266 | * Lightweight calibrate_delay implementation that calculates loops_per_jiffy | ||
| 267 | * from the clock frequency passed in via the device tree | ||
| 268 | * | ||
| 269 | */ | ||
| 270 | |||
| 271 | void __cpuinit calibrate_delay(void) | ||
| 272 | { | ||
| 273 | const int *val; | ||
| 274 | struct device_node *cpu = NULL; | ||
| 275 | cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481"); | ||
| 276 | val = of_get_property(cpu, "clock-frequency", NULL); | ||
| 277 | if (!val) | ||
| 278 | panic("no cpu 'clock-frequency' parameter in device tree"); | ||
| 279 | loops_per_jiffy = *val / HZ; | ||
| 280 | pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", | ||
| 281 | loops_per_jiffy / (500000 / HZ), | ||
| 282 | (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy); | ||
| 283 | } | ||
| 284 | |||
| 285 | void __init setup_arch(char **cmdline_p) | ||
| 286 | { | ||
| 287 | unsigned long max_low_pfn; | ||
| 288 | |||
| 289 | unflatten_device_tree(); | ||
| 290 | |||
| 291 | setup_cpuinfo(); | ||
| 292 | |||
| 293 | /* process 1's initial memory region is the kernel code/data */ | ||
| 294 | init_mm.start_code = (unsigned long)&_stext; | ||
| 295 | init_mm.end_code = (unsigned long)&_etext; | ||
| 296 | init_mm.end_data = (unsigned long)&_edata; | ||
| 297 | init_mm.brk = (unsigned long)&_end; | ||
| 298 | |||
| 299 | #ifdef CONFIG_BLK_DEV_INITRD | ||
| 300 | initrd_start = (unsigned long)&__initrd_start; | ||
| 301 | initrd_end = (unsigned long)&__initrd_end; | ||
| 302 | if (initrd_start == initrd_end) { | ||
| 303 | initrd_start = 0; | ||
| 304 | initrd_end = 0; | ||
| 305 | } | ||
| 306 | initrd_below_start_ok = 1; | ||
| 307 | #endif | ||
| 308 | |||
| 309 | /* setup bootmem allocator */ | ||
| 310 | max_low_pfn = setup_memory(); | ||
| 311 | |||
| 312 | /* paging_init() sets up the MMU and marks all pages as reserved */ | ||
| 313 | paging_init(); | ||
| 314 | |||
| 315 | #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) | ||
| 316 | if (!conswitchp) | ||
| 317 | conswitchp = &dummy_con; | ||
| 318 | #endif | ||
| 319 | |||
| 320 | *cmdline_p = cmd_line; | ||
| 321 | |||
| 322 | printk(KERN_INFO "OpenRISC Linux -- http://openrisc.net\n"); | ||
| 323 | } | ||
| 324 | |||
| 325 | static int show_cpuinfo(struct seq_file *m, void *v) | ||
| 326 | { | ||
| 327 | unsigned long vr; | ||
| 328 | int version, revision; | ||
| 329 | |||
| 330 | vr = mfspr(SPR_VR); | ||
| 331 | version = (vr & SPR_VR_VER) >> 24; | ||
| 332 | revision = vr & SPR_VR_REV; | ||
| 333 | |||
| 334 | return seq_printf(m, | ||
| 335 | "cpu\t\t: OpenRISC-%x\n" | ||
| 336 | "revision\t: %d\n" | ||
| 337 | "frequency\t: %ld\n" | ||
| 338 | "dcache size\t: %d bytes\n" | ||
| 339 | "dcache block size\t: %d bytes\n" | ||
| 340 | "icache size\t: %d bytes\n" | ||
| 341 | "icache block size\t: %d bytes\n" | ||
| 342 | "immu\t\t: %d entries, %lu ways\n" | ||
| 343 | "dmmu\t\t: %d entries, %lu ways\n" | ||
| 344 | "bogomips\t: %lu.%02lu\n", | ||
| 345 | version, | ||
| 346 | revision, | ||
| 347 | loops_per_jiffy * HZ, | ||
| 348 | cpuinfo.dcache_size, | ||
| 349 | cpuinfo.dcache_block_size, | ||
| 350 | cpuinfo.icache_size, | ||
| 351 | cpuinfo.icache_block_size, | ||
| 352 | 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2), | ||
| 353 | 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW), | ||
| 354 | 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2), | ||
| 355 | 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW), | ||
| 356 | (loops_per_jiffy * HZ) / 500000, | ||
| 357 | ((loops_per_jiffy * HZ) / 5000) % 100); | ||
| 358 | } | ||
| 359 | |||
| 360 | static void *c_start(struct seq_file *m, loff_t * pos) | ||
| 361 | { | ||
| 362 | /* We only have one CPU... */ | ||
| 363 | return *pos < 1 ? (void *)1 : NULL; | ||
| 364 | } | ||
| 365 | |||
| 366 | static void *c_next(struct seq_file *m, void *v, loff_t * pos) | ||
| 367 | { | ||
| 368 | ++*pos; | ||
| 369 | return NULL; | ||
| 370 | } | ||
| 371 | |||
| 372 | static void c_stop(struct seq_file *m, void *v) | ||
| 373 | { | ||
| 374 | } | ||
| 375 | |||
| 376 | const struct seq_operations cpuinfo_op = { | ||
| 377 | .start = c_start, | ||
| 378 | .next = c_next, | ||
| 379 | .stop = c_stop, | ||
| 380 | .show = show_cpuinfo, | ||
| 381 | }; | ||
