aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStefan Kristiansson <stefan.kristiansson@saunalahti.fi>2014-05-17 12:18:58 -0400
committerStafford Horne <shorne@gmail.com>2017-02-06 07:50:43 -0500
commit8c9b7db0de3d64c9a6fcd12622636d4aa6a8c30c (patch)
tree2b764ae08bee406a851d79776c50b997e464c6dc
parentc2dc72437a5504bf8f4343ed83ae745afa388522 (diff)
openrisc: head: refactor out tlb flush into it's own function
This brings it inline with the other setup oprations done like the cache enables _ic_enable and _dc_enable. Also, this is going to make it easier to initialize additional cpu's when smp is introduced. Signed-off-by: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> [shorne@gmail.com: Added commit body] Signed-off-by: Stafford Horne <shorne@gmail.com>
-rw-r--r--arch/openrisc/kernel/head.S38
1 files changed, 22 insertions, 16 deletions
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
index 63ba2d96a860..a22f1fc40a6c 100644
--- a/arch/openrisc/kernel/head.S
+++ b/arch/openrisc/kernel/head.S
@@ -522,22 +522,8 @@ enable_dc:
522 l.nop 522 l.nop
523 523
524flush_tlb: 524flush_tlb:
525 /* 525 l.jal _flush_tlb
526 * I N V A L I D A T E T L B e n t r i e s 526 l.nop
527 */
528 LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
529 LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
530 l.addi r7,r0,128 /* Maximum number of sets */
5311:
532 l.mtspr r5,r0,0x0
533 l.mtspr r6,r0,0x0
534
535 l.addi r5,r5,1
536 l.addi r6,r6,1
537 l.sfeq r7,r0
538 l.bnf 1b
539 l.addi r7,r7,-1
540
541 527
542/* The MMU needs to be enabled before or32_early_setup is called */ 528/* The MMU needs to be enabled before or32_early_setup is called */
543 529
@@ -629,6 +615,26 @@ jump_start_kernel:
629 l.jr r30 615 l.jr r30
630 l.nop 616 l.nop
631 617
618_flush_tlb:
619 /*
620 * I N V A L I D A T E T L B e n t r i e s
621 */
622 LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
623 LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
624 l.addi r7,r0,128 /* Maximum number of sets */
6251:
626 l.mtspr r5,r0,0x0
627 l.mtspr r6,r0,0x0
628
629 l.addi r5,r5,1
630 l.addi r6,r6,1
631 l.sfeq r7,r0
632 l.bnf 1b
633 l.addi r7,r7,-1
634
635 l.jr r9
636 l.nop
637
632/* ========================================[ cache ]=== */ 638/* ========================================[ cache ]=== */
633 639
634 /* aligment here so we don't change memory offsets with 640 /* aligment here so we don't change memory offsets with