aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/head_44x.S
diff options
context:
space:
mode:
authorDave Kleikamp <shaggy@linux.vnet.ibm.com>2010-03-05 05:43:12 -0500
committerJosh Boyer <jwboyer@linux.vnet.ibm.com>2010-05-05 09:11:10 -0400
commite7f75ad01d590243904c2d95ab47e6b2e9ef6dad (patch)
tree454cf065417973e9c2fcd75542351c2534b9a4b9 /arch/powerpc/kernel/head_44x.S
parent795033c344d88dc6aa5106d0cc358656f29bd722 (diff)
powerpc/47x: Base ppc476 support
This patch adds the base support for the 476 processor. The code was primarily written by Ben Herrenschmidt and Torez Smith, but I've been maintaining it for a while. The goal is to have a single binary that will run on 44x and 47x, but we still have some details to work out. The biggest is that the L1 cache line size differs on the two platforms, but it's currently a compile-time option. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Torez Smith <lnxtorez@linux.vnet.ibm.com> Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com> Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
Diffstat (limited to 'arch/powerpc/kernel/head_44x.S')
-rw-r--r--arch/powerpc/kernel/head_44x.S502
1 files changed, 486 insertions, 16 deletions
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 39be049a7850..1acd175428c4 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -37,6 +37,7 @@
37#include <asm/thread_info.h> 37#include <asm/thread_info.h>
38#include <asm/ppc_asm.h> 38#include <asm/ppc_asm.h>
39#include <asm/asm-offsets.h> 39#include <asm/asm-offsets.h>
40#include <asm/synch.h>
40#include "head_booke.h" 41#include "head_booke.h"
41 42
42 43
@@ -191,7 +192,7 @@ interrupt_base:
191#endif 192#endif
192 193
193 /* Data TLB Error Interrupt */ 194 /* Data TLB Error Interrupt */
194 START_EXCEPTION(DataTLBError) 195 START_EXCEPTION(DataTLBError44x)
195 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 196 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
196 mtspr SPRN_SPRG_WSCRATCH1, r11 197 mtspr SPRN_SPRG_WSCRATCH1, r11
197 mtspr SPRN_SPRG_WSCRATCH2, r12 198 mtspr SPRN_SPRG_WSCRATCH2, r12
@@ -282,7 +283,7 @@ tlb_44x_patch_hwater_D:
282 mfspr r10,SPRN_DEAR 283 mfspr r10,SPRN_DEAR
283 284
284 /* Jump to common tlb load */ 285 /* Jump to common tlb load */
285 b finish_tlb_load 286 b finish_tlb_load_44x
286 287
2872: 2882:
288 /* The bailout. Restore registers to pre-exception conditions 289 /* The bailout. Restore registers to pre-exception conditions
@@ -302,7 +303,7 @@ tlb_44x_patch_hwater_D:
302 * information from different registers and bailout 303 * information from different registers and bailout
303 * to a different point. 304 * to a different point.
304 */ 305 */
305 START_EXCEPTION(InstructionTLBError) 306 START_EXCEPTION(InstructionTLBError44x)
306 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 307 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
307 mtspr SPRN_SPRG_WSCRATCH1, r11 308 mtspr SPRN_SPRG_WSCRATCH1, r11
308 mtspr SPRN_SPRG_WSCRATCH2, r12 309 mtspr SPRN_SPRG_WSCRATCH2, r12
@@ -378,7 +379,7 @@ tlb_44x_patch_hwater_I:
378 mfspr r10,SPRN_SRR0 379 mfspr r10,SPRN_SRR0
379 380
380 /* Jump to common TLB load point */ 381 /* Jump to common TLB load point */
381 b finish_tlb_load 382 b finish_tlb_load_44x
382 383
3832: 3842:
384 /* The bailout. Restore registers to pre-exception conditions 385 /* The bailout. Restore registers to pre-exception conditions
@@ -392,15 +393,7 @@ tlb_44x_patch_hwater_I:
392 mfspr r10, SPRN_SPRG_RSCRATCH0 393 mfspr r10, SPRN_SPRG_RSCRATCH0
393 b InstructionStorage 394 b InstructionStorage
394 395
395 /* Debug Interrupt */
396 DEBUG_CRIT_EXCEPTION
397
398/*
399 * Local functions
400 */
401
402/* 396/*
403
404 * Both the instruction and data TLB miss get to this 397 * Both the instruction and data TLB miss get to this
405 * point to load the TLB. 398 * point to load the TLB.
406 * r10 - EA of fault 399 * r10 - EA of fault
@@ -410,7 +403,7 @@ tlb_44x_patch_hwater_I:
410 * MMUCR - loaded with proper value when we get here 403 * MMUCR - loaded with proper value when we get here
411 * Upon exit, we reload everything and RFI. 404 * Upon exit, we reload everything and RFI.
412 */ 405 */
413finish_tlb_load: 406finish_tlb_load_44x:
414 /* Combine RPN & ERPN an write WS 0 */ 407 /* Combine RPN & ERPN an write WS 0 */
415 rlwimi r11,r12,0,0,31-PAGE_SHIFT 408 rlwimi r11,r12,0,0,31-PAGE_SHIFT
416 tlbwe r11,r13,PPC44x_TLB_XLAT 409 tlbwe r11,r13,PPC44x_TLB_XLAT
@@ -443,6 +436,227 @@ finish_tlb_load:
443 mfspr r10, SPRN_SPRG_RSCRATCH0 436 mfspr r10, SPRN_SPRG_RSCRATCH0
444 rfi /* Force context change */ 437 rfi /* Force context change */
445 438
439/* TLB error interrupts for 476
440 */
441#ifdef CONFIG_PPC_47x
442 START_EXCEPTION(DataTLBError47x)
443 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
444 mtspr SPRN_SPRG_WSCRATCH1,r11
445 mtspr SPRN_SPRG_WSCRATCH2,r12
446 mtspr SPRN_SPRG_WSCRATCH3,r13
447 mfcr r11
448 mtspr SPRN_SPRG_WSCRATCH4,r11
449 mfspr r10,SPRN_DEAR /* Get faulting address */
450
451 /* If we are faulting a kernel address, we have to use the
452 * kernel page tables.
453 */
454 lis r11,PAGE_OFFSET@h
455 cmplw cr0,r10,r11
456 blt+ 3f
457 lis r11,swapper_pg_dir@h
458 ori r11,r11, swapper_pg_dir@l
459 li r12,0 /* MMUCR = 0 */
460 b 4f
461
462 /* Get the PGD for the current thread and setup MMUCR */
4633: mfspr r11,SPRN_SPRG3
464 lwz r11,PGDIR(r11)
465 mfspr r12,SPRN_PID /* Get PID */
4664: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
467
468 /* Mask of required permission bits. Note that while we
469 * do copy ESR:ST to _PAGE_RW position as trying to write
470 * to an RO page is pretty common, we don't do it with
471 * _PAGE_DIRTY. We could do it, but it's a fairly rare
472 * event so I'd rather take the overhead when it happens
473 * rather than adding an instruction here. We should measure
474 * whether the whole thing is worth it in the first place
475 * as we could avoid loading SPRN_ESR completely in the first
476 * place...
477 *
478 * TODO: Is it worth doing that mfspr & rlwimi in the first
479 * place or can we save a couple of instructions here ?
480 */
481 mfspr r12,SPRN_ESR
482 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
483 rlwimi r13,r12,10,30,30
484
485 /* Load the PTE */
486 /* Compute pgdir/pmd offset */
487 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
488 lwzx r11,r12,r11 /* Get pgd/pmd entry */
489
490 /* Word 0 is EPN,V,TS,DSIZ */
491 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
492 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
493 li r12,0
494 tlbwe r10,r12,0
495
496 /* XXX can we do better ? Need to make sure tlbwe has established
497 * latch V bit in MMUCR0 before the PTE is loaded further down */
498#ifdef CONFIG_SMP
499 isync
500#endif
501
502 rlwinm. r12,r11,0,0,20 /* Extract pt base address */
503 /* Compute pte address */
504 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
505 beq 2f /* Bail if no table */
506 lwz r11,0(r12) /* Get high word of pte entry */
507
508 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
509 * bottom of r12 to create a data dependency... We can also use r10
510 * as destination nowadays
511 */
512#ifdef CONFIG_SMP
513 lwsync
514#endif
515 lwz r12,4(r12) /* Get low word of pte entry */
516
517 andc. r13,r13,r12 /* Check permission */
518
519 /* Jump to common tlb load */
520 beq finish_tlb_load_47x
521
5222: /* The bailout. Restore registers to pre-exception conditions
523 * and call the heavyweights to help us out.
524 */
525 mfspr r11,SPRN_SPRG_RSCRATCH4
526 mtcr r11
527 mfspr r13,SPRN_SPRG_RSCRATCH3
528 mfspr r12,SPRN_SPRG_RSCRATCH2
529 mfspr r11,SPRN_SPRG_RSCRATCH1
530 mfspr r10,SPRN_SPRG_RSCRATCH0
531 b DataStorage
532
533 /* Instruction TLB Error Interrupt */
534 /*
535 * Nearly the same as above, except we get our
536 * information from different registers and bailout
537 * to a different point.
538 */
539 START_EXCEPTION(InstructionTLBError47x)
540 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
541 mtspr SPRN_SPRG_WSCRATCH1,r11
542 mtspr SPRN_SPRG_WSCRATCH2,r12
543 mtspr SPRN_SPRG_WSCRATCH3,r13
544 mfcr r11
545 mtspr SPRN_SPRG_WSCRATCH4,r11
546 mfspr r10,SPRN_SRR0 /* Get faulting address */
547
548 /* If we are faulting a kernel address, we have to use the
549 * kernel page tables.
550 */
551 lis r11,PAGE_OFFSET@h
552 cmplw cr0,r10,r11
553 blt+ 3f
554 lis r11,swapper_pg_dir@h
555 ori r11,r11, swapper_pg_dir@l
556 li r12,0 /* MMUCR = 0 */
557 b 4f
558
559 /* Get the PGD for the current thread and setup MMUCR */
5603: mfspr r11,SPRN_SPRG_THREAD
561 lwz r11,PGDIR(r11)
562 mfspr r12,SPRN_PID /* Get PID */
5634: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
564
565 /* Make up the required permissions */
566 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
567
568 /* Load PTE */
569 /* Compute pgdir/pmd offset */
570 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
571 lwzx r11,r12,r11 /* Get pgd/pmd entry */
572
573 /* Word 0 is EPN,V,TS,DSIZ */
574 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
575 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
576 li r12,0
577 tlbwe r10,r12,0
578
579 /* XXX can we do better ? Need to make sure tlbwe has established
580 * latch V bit in MMUCR0 before the PTE is loaded further down */
581#ifdef CONFIG_SMP
582 isync
583#endif
584
585 rlwinm. r12,r11,0,0,20 /* Extract pt base address */
586 /* Compute pte address */
587 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
588 beq 2f /* Bail if no table */
589
590 lwz r11,0(r12) /* Get high word of pte entry */
591 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
592 * bottom of r12 to create a data dependency... We can also use r10
593 * as destination nowadays
594 */
595#ifdef CONFIG_SMP
596 lwsync
597#endif
598 lwz r12,4(r12) /* Get low word of pte entry */
599
600 andc. r13,r13,r12 /* Check permission */
601
602 /* Jump to common TLB load point */
603 beq finish_tlb_load_47x
604
6052: /* The bailout. Restore registers to pre-exception conditions
606 * and call the heavyweights to help us out.
607 */
608 mfspr r11, SPRN_SPRG_RSCRATCH4
609 mtcr r11
610 mfspr r13, SPRN_SPRG_RSCRATCH3
611 mfspr r12, SPRN_SPRG_RSCRATCH2
612 mfspr r11, SPRN_SPRG_RSCRATCH1
613 mfspr r10, SPRN_SPRG_RSCRATCH0
614 b InstructionStorage
615
616/*
617 * Both the instruction and data TLB miss get to this
618 * point to load the TLB.
619 * r10 - free to use
620 * r11 - PTE high word value
621 * r12 - PTE low word value
622 * r13 - free to use
623 * MMUCR - loaded with proper value when we get here
624 * Upon exit, we reload everything and RFI.
625 */
626finish_tlb_load_47x:
627 /* Combine RPN & ERPN an write WS 1 */
628 rlwimi r11,r12,0,0,31-PAGE_SHIFT
629 tlbwe r11,r13,1
630
631 /* And make up word 2 */
632 li r10,0xf85 /* Mask to apply from PTE */
633 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
634 and r11,r12,r10 /* Mask PTE bits to keep */
635 andi. r10,r12,_PAGE_USER /* User page ? */
636 beq 1f /* nope, leave U bits empty */
637 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
6381: tlbwe r11,r13,2
639
640 /* Done...restore registers and get out of here.
641 */
642 mfspr r11, SPRN_SPRG_RSCRATCH4
643 mtcr r11
644 mfspr r13, SPRN_SPRG_RSCRATCH3
645 mfspr r12, SPRN_SPRG_RSCRATCH2
646 mfspr r11, SPRN_SPRG_RSCRATCH1
647 mfspr r10, SPRN_SPRG_RSCRATCH0
648 rfi
649
650#endif /* CONFIG_PPC_47x */
651
652 /* Debug Interrupt */
653 /*
654 * This statement needs to exist at the end of the IVPR
655 * definition just in case you end up taking a debug
656 * exception within another exception.
657 */
658 DEBUG_CRIT_EXCEPTION
659
446/* 660/*
447 * Global functions 661 * Global functions
448 */ 662 */
@@ -491,9 +705,18 @@ _GLOBAL(set_context)
491/* 705/*
492 * Init CPU state. This is called at boot time or for secondary CPUs 706 * Init CPU state. This is called at boot time or for secondary CPUs
493 * to setup initial TLB entries, setup IVORs, etc... 707 * to setup initial TLB entries, setup IVORs, etc...
708 *
494 */ 709 */
495_GLOBAL(init_cpu_state) 710_GLOBAL(init_cpu_state)
496 mflr r22 711 mflr r22
712#ifdef CONFIG_PPC_47x
713 /* We use the PVR to differenciate 44x cores from 476 */
714 mfspr r3,SPRN_PVR
715 srwi r3,r3,16
716 cmplwi cr0,r3,PVR_476@h
717 beq head_start_47x
718#endif /* CONFIG_PPC_47x */
719
497/* 720/*
498 * In case the firmware didn't do it, we apply some workarounds 721 * In case the firmware didn't do it, we apply some workarounds
499 * that are good for all 440 core variants here 722 * that are good for all 440 core variants here
@@ -506,7 +729,7 @@ _GLOBAL(init_cpu_state)
506 sync 729 sync
507 730
508/* 731/*
509 * Set up the initial MMU state 732 * Set up the initial MMU state for 44x
510 * 733 *
511 * We are still executing code at the virtual address 734 * We are still executing code at the virtual address
512 * mappings set by the firmware for the base of RAM. 735 * mappings set by the firmware for the base of RAM.
@@ -646,16 +869,257 @@ skpinv: addi r4,r4,1 /* Increment */
646 SET_IVOR(10, Decrementer); 869 SET_IVOR(10, Decrementer);
647 SET_IVOR(11, FixedIntervalTimer); 870 SET_IVOR(11, FixedIntervalTimer);
648 SET_IVOR(12, WatchdogTimer); 871 SET_IVOR(12, WatchdogTimer);
649 SET_IVOR(13, DataTLBError); 872 SET_IVOR(13, DataTLBError44x);
650 SET_IVOR(14, InstructionTLBError); 873 SET_IVOR(14, InstructionTLBError44x);
651 SET_IVOR(15, DebugCrit); 874 SET_IVOR(15, DebugCrit);
652 875
876 b head_start_common
877
878
879#ifdef CONFIG_PPC_47x
880
881#ifdef CONFIG_SMP
882
883/* Entry point for secondary 47x processors */
884_GLOBAL(start_secondary_47x)
885 mr r24,r3 /* CPU number */
886
887 bl init_cpu_state
888
889 /* Now we need to bolt the rest of kernel memory which
890 * is done in C code. We must be careful because our task
891 * struct or our stack can (and will probably) be out
892 * of reach of the initial 256M TLB entry, so we use a
893 * small temporary stack in .bss for that. This works
894 * because only one CPU at a time can be in this code
895 */
896 lis r1,temp_boot_stack@h
897 ori r1,r1,temp_boot_stack@l
898 addi r1,r1,1024-STACK_FRAME_OVERHEAD
899 li r0,0
900 stw r0,0(r1)
901 bl mmu_init_secondary
902
903 /* Now we can get our task struct and real stack pointer */
904
905 /* Get current_thread_info and current */
906 lis r1,secondary_ti@ha
907 lwz r1,secondary_ti@l(r1)
908 lwz r2,TI_TASK(r1)
909
910 /* Current stack pointer */
911 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
912 li r0,0
913 stw r0,0(r1)
914
915 /* Kernel stack for exception entry in SPRG3 */
916 addi r4,r2,THREAD /* init task's THREAD */
917 mtspr SPRN_SPRG3,r4
918
919 b start_secondary
920
921#endif /* CONFIG_SMP */
922
923/*
924 * Set up the initial MMU state for 44x
925 *
926 * We are still executing code at the virtual address
927 * mappings set by the firmware for the base of RAM.
928 */
929
930head_start_47x:
931 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
932 mfspr r3,SPRN_PID /* Get PID */
933 mfmsr r4 /* Get MSR */
934 andi. r4,r4,MSR_IS@l /* TS=1? */
935 beq 1f /* If not, leave STS=0 */
936 oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */
9371: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
938 sync
939
940 /* Find the entry we are running from */
941 bl 1f
9421: mflr r23
943 tlbsx r23,0,r23
944 tlbre r24,r23,0
945 tlbre r25,r23,1
946 tlbre r26,r23,2
947
948/*
949 * Cleanup time
950 */
951
952 /* Initialize MMUCR */
953 li r5,0
954 mtspr SPRN_MMUCR,r5
955 sync
956
957clear_all_utlb_entries:
958
959 #; Set initial values.
960
961 addis r3,0,0x8000
962 addi r4,0,0
963 addi r5,0,0
964 b clear_utlb_entry
965
966 #; Align the loop to speed things up.
967
968 .align 6
969
970clear_utlb_entry:
971
972 tlbwe r4,r3,0
973 tlbwe r5,r3,1
974 tlbwe r5,r3,2
975 addis r3,r3,0x2000
976 cmpwi r3,0
977 bne clear_utlb_entry
978 addis r3,0,0x8000
979 addis r4,r4,0x100
980 cmpwi r4,0
981 bne clear_utlb_entry
982
983 #; Restore original entry.
984
985 oris r23,r23,0x8000 /* specify the way */
986 tlbwe r24,r23,0
987 tlbwe r25,r23,1
988 tlbwe r26,r23,2
989
990/*
991 * Configure and load pinned entry into TLB for the kernel core
992 */
993
994 lis r3,PAGE_OFFSET@h
995 ori r3,r3,PAGE_OFFSET@l
996
997 /* Kernel is at the base of RAM */
998 li r4, 0 /* Load the kernel physical address */
999
1000 /* Load the kernel PID = 0 */
1001 li r0,0
1002 mtspr SPRN_PID,r0
1003 sync
1004
1005 /* Word 0 */
1006 clrrwi r3,r3,12 /* Mask off the effective page number */
1007 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
1008
1009 /* Word 1 */
1010 clrrwi r4,r4,12 /* Mask off the real page number */
1011 /* ERPN is 0 for first 4GB page */
1012 /* Word 2 */
1013 li r5,0
1014 ori r5,r5,PPC47x_TLB2_S_RWX
1015#ifdef CONFIG_SMP
1016 ori r5,r5,PPC47x_TLB2_M
1017#endif
1018
1019 /* We write to way 0 and bolted 0 */
1020 lis r0,0x8800
1021 tlbwe r3,r0,0
1022 tlbwe r4,r0,1
1023 tlbwe r5,r0,2
1024
1025/*
1026 * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
1027 * them up later
1028 */
1029 LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
1030 mtspr SPRN_SSPCR,r3
1031 mtspr SPRN_USPCR,r3
1032 LOAD_REG_IMMEDIATE(r3, 0x12345670)
1033 mtspr SPRN_ISPCR,r3
1034
1035 /* Force context change */
1036 mfmsr r0
1037 mtspr SPRN_SRR1, r0
1038 lis r0,3f@h
1039 ori r0,r0,3f@l
1040 mtspr SPRN_SRR0,r0
1041 sync
1042 rfi
1043
1044 /* Invalidate original entry we used */
10453:
1046 rlwinm r24,r24,0,21,19 /* clear the "valid" bit */
1047 tlbwe r24,r23,0
1048 addi r24,0,0
1049 tlbwe r24,r23,1
1050 tlbwe r24,r23,2
1051 isync /* Clear out the shadow TLB entries */
1052
1053#ifdef CONFIG_PPC_EARLY_DEBUG_44x
1054 /* Add UART mapping for early debug. */
1055
1056 /* Word 0 */
1057 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
1058 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
1059
1060 /* Word 1 */
1061 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
1062 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
1063
1064 /* Word 2 */
1065 li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
1066
1067 /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
1068 * congruence class as the kernel, we need to make sure of it at
1069 * some point
1070 */
1071 lis r0,0x8d00
1072 tlbwe r3,r0,0
1073 tlbwe r4,r0,1
1074 tlbwe r5,r0,2
1075
1076 /* Force context change */
1077 isync
1078#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
1079
1080 /* Establish the interrupt vector offsets */
1081 SET_IVOR(0, CriticalInput);
1082 SET_IVOR(1, MachineCheckA);
1083 SET_IVOR(2, DataStorage);
1084 SET_IVOR(3, InstructionStorage);
1085 SET_IVOR(4, ExternalInput);
1086 SET_IVOR(5, Alignment);
1087 SET_IVOR(6, Program);
1088 SET_IVOR(7, FloatingPointUnavailable);
1089 SET_IVOR(8, SystemCall);
1090 SET_IVOR(9, AuxillaryProcessorUnavailable);
1091 SET_IVOR(10, Decrementer);
1092 SET_IVOR(11, FixedIntervalTimer);
1093 SET_IVOR(12, WatchdogTimer);
1094 SET_IVOR(13, DataTLBError47x);
1095 SET_IVOR(14, InstructionTLBError47x);
1096 SET_IVOR(15, DebugCrit);
1097
1098 /* We configure icbi to invalidate 128 bytes at a time since the
1099 * current 32-bit kernel code isn't too happy with icache != dcache
1100 * block size
1101 */
1102 mfspr r3,SPRN_CCR0
1103 oris r3,r3,0x0020
1104 mtspr SPRN_CCR0,r3
1105 isync
1106
1107#endif /* CONFIG_PPC_47x */
1108
1109/*
1110 * Here we are back to code that is common between 44x and 47x
1111 *
1112 * We proceed to further kernel initialization and return to the
1113 * main kernel entry
1114 */
1115head_start_common:
653 /* Establish the interrupt vector base */ 1116 /* Establish the interrupt vector base */
654 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ 1117 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
655 mtspr SPRN_IVPR,r4 1118 mtspr SPRN_IVPR,r4
656 1119
657 addis r22,r22,KERNELBASE@h 1120 addis r22,r22,KERNELBASE@h
658 mtlr r22 1121 mtlr r22
1122 isync
659 blr 1123 blr
660 1124
661/* 1125/*
@@ -683,3 +1147,9 @@ swapper_pg_dir:
683 */ 1147 */
684abatron_pteptrs: 1148abatron_pteptrs:
685 .space 8 1149 .space 8
1150
1151#ifdef CONFIG_SMP
1152 .align 12
1153temp_boot_stack:
1154 .space 1024
1155#endif /* CONFIG_SMP */