diff options
-rw-r--r-- | arch/powerpc/include/asm/exception-64s.h | 15 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 42 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 95 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 44 | ||||
-rw-r--r-- | arch/powerpc/kernel/misc.S | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/vmlinux.lds.S | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 16 |
7 files changed, 15 insertions, 203 deletions
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 8057f4f6980f..cc2bcf464746 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -272,26 +272,11 @@ label##_hv: \ | |||
272 | _MASKABLE_EXCEPTION_PSERIES(vec, label, \ | 272 | _MASKABLE_EXCEPTION_PSERIES(vec, label, \ |
273 | EXC_HV, SOFTEN_TEST_HV) | 273 | EXC_HV, SOFTEN_TEST_HV) |
274 | 274 | ||
275 | #ifdef CONFIG_PPC_ISERIES | ||
276 | #define DISABLE_INTS \ | ||
277 | li r11,0; \ | ||
278 | stb r11,PACASOFTIRQEN(r13); \ | ||
279 | BEGIN_FW_FTR_SECTION; \ | ||
280 | stb r11,PACAHARDIRQEN(r13); \ | ||
281 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \ | ||
282 | TRACE_DISABLE_INTS; \ | ||
283 | BEGIN_FW_FTR_SECTION; \ | ||
284 | mfmsr r10; \ | ||
285 | ori r10,r10,MSR_EE; \ | ||
286 | mtmsrd r10,1; \ | ||
287 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
288 | #else | ||
289 | #define DISABLE_INTS \ | 275 | #define DISABLE_INTS \ |
290 | li r11,0; \ | 276 | li r11,0; \ |
291 | stb r11,PACASOFTIRQEN(r13); \ | 277 | stb r11,PACASOFTIRQEN(r13); \ |
292 | stb r11,PACAHARDIRQEN(r13); \ | 278 | stb r11,PACAHARDIRQEN(r13); \ |
293 | TRACE_DISABLE_INTS | 279 | TRACE_DISABLE_INTS |
294 | #endif /* CONFIG_PPC_ISERIES */ | ||
295 | 280 | ||
296 | #define ENABLE_INTS \ | 281 | #define ENABLE_INTS \ |
297 | ld r12,_MSR(r1); \ | 282 | ld r12,_MSR(r1); \ |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 866462cbe2d8..0c3764ba8d49 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -127,17 +127,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |||
127 | stb r10,PACASOFTIRQEN(r13) | 127 | stb r10,PACASOFTIRQEN(r13) |
128 | stb r10,PACAHARDIRQEN(r13) | 128 | stb r10,PACAHARDIRQEN(r13) |
129 | std r10,SOFTE(r1) | 129 | std r10,SOFTE(r1) |
130 | #ifdef CONFIG_PPC_ISERIES | ||
131 | BEGIN_FW_FTR_SECTION | ||
132 | /* Hack for handling interrupts when soft-enabling on iSeries */ | ||
133 | cmpdi cr1,r0,0x5555 /* syscall 0x5555 */ | ||
134 | andi. r10,r12,MSR_PR /* from kernel */ | ||
135 | crand 4*cr0+eq,4*cr1+eq,4*cr0+eq | ||
136 | bne 2f | ||
137 | b hardware_interrupt_entry | ||
138 | 2: | ||
139 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
140 | #endif /* CONFIG_PPC_ISERIES */ | ||
141 | 130 | ||
142 | /* Hard enable interrupts */ | 131 | /* Hard enable interrupts */ |
143 | #ifdef CONFIG_PPC_BOOK3E | 132 | #ifdef CONFIG_PPC_BOOK3E |
@@ -591,15 +580,10 @@ _GLOBAL(ret_from_except_lite) | |||
591 | ld r4,TI_FLAGS(r9) | 580 | ld r4,TI_FLAGS(r9) |
592 | andi. r0,r4,_TIF_USER_WORK_MASK | 581 | andi. r0,r4,_TIF_USER_WORK_MASK |
593 | bne do_work | 582 | bne do_work |
594 | #endif | 583 | #endif /* !CONFIG_PREEMPT */ |
595 | 584 | ||
596 | restore: | 585 | restore: |
597 | BEGIN_FW_FTR_SECTION | ||
598 | ld r5,SOFTE(r1) | 586 | ld r5,SOFTE(r1) |
599 | FW_FTR_SECTION_ELSE | ||
600 | b .Liseries_check_pending_irqs | ||
601 | ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | ||
602 | 2: | ||
603 | TRACE_AND_RESTORE_IRQ(r5); | 587 | TRACE_AND_RESTORE_IRQ(r5); |
604 | 588 | ||
605 | /* extract EE bit and use it to restore paca->hard_enabled */ | 589 | /* extract EE bit and use it to restore paca->hard_enabled */ |
@@ -669,30 +653,6 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | |||
669 | 653 | ||
670 | #endif /* CONFIG_PPC_BOOK3E */ | 654 | #endif /* CONFIG_PPC_BOOK3E */ |
671 | 655 | ||
672 | .Liseries_check_pending_irqs: | ||
673 | #ifdef CONFIG_PPC_ISERIES | ||
674 | ld r5,SOFTE(r1) | ||
675 | cmpdi 0,r5,0 | ||
676 | beq 2b | ||
677 | /* Check for pending interrupts (iSeries) */ | ||
678 | ld r3,PACALPPACAPTR(r13) | ||
679 | ld r3,LPPACAANYINT(r3) | ||
680 | cmpdi r3,0 | ||
681 | beq+ 2b /* skip do_IRQ if no interrupts */ | ||
682 | |||
683 | li r3,0 | ||
684 | stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ | ||
685 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
686 | bl .trace_hardirqs_off | ||
687 | mfmsr r10 | ||
688 | #endif | ||
689 | ori r10,r10,MSR_EE | ||
690 | mtmsrd r10 /* hard-enable again */ | ||
691 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
692 | bl .do_IRQ | ||
693 | b .ret_from_except_lite /* loop back and handle more */ | ||
694 | #endif | ||
695 | |||
696 | do_work: | 656 | do_work: |
697 | #ifdef CONFIG_PREEMPT | 657 | #ifdef CONFIG_PREEMPT |
698 | andi. r0,r3,MSR_PR /* Returning to user mode? */ | 658 | andi. r0,r3,MSR_PR /* Returning to user mode? */ |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 15c5a4f6de01..fea8a69df4b2 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -19,7 +19,7 @@ | |||
19 | * We layout physical memory as follows: | 19 | * We layout physical memory as follows: |
20 | * 0x0000 - 0x00ff : Secondary processor spin code | 20 | * 0x0000 - 0x00ff : Secondary processor spin code |
21 | * 0x0100 - 0x2fff : pSeries Interrupt prologs | 21 | * 0x0100 - 0x2fff : pSeries Interrupt prologs |
22 | * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs | 22 | * 0x3000 - 0x5fff : interrupt support common interrupt prologs |
23 | * 0x6000 - 0x6fff : Initial (CPU0) segment table | 23 | * 0x6000 - 0x6fff : Initial (CPU0) segment table |
24 | * 0x7000 - 0x7fff : FWNMI data area | 24 | * 0x7000 - 0x7fff : FWNMI data area |
25 | * 0x8000 - : Early init and support code | 25 | * 0x8000 - : Early init and support code |
@@ -458,6 +458,7 @@ machine_check_common: | |||
458 | bl .machine_check_exception | 458 | bl .machine_check_exception |
459 | b .ret_from_except | 459 | b .ret_from_except |
460 | 460 | ||
461 | STD_EXCEPTION_COMMON_LITE(0x500, hardware_interrupt, do_IRQ) | ||
461 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) | 462 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) |
462 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) | 463 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) |
463 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | 464 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) |
@@ -672,12 +673,6 @@ _GLOBAL(slb_miss_realmode) | |||
672 | ld r10,PACA_EXSLB+EX_LR(r13) | 673 | ld r10,PACA_EXSLB+EX_LR(r13) |
673 | ld r3,PACA_EXSLB+EX_R3(r13) | 674 | ld r3,PACA_EXSLB+EX_R3(r13) |
674 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | 675 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ |
675 | #ifdef CONFIG_PPC_ISERIES | ||
676 | BEGIN_FW_FTR_SECTION | ||
677 | ld r11,PACALPPACAPTR(r13) | ||
678 | ld r11,LPPACASRR0(r11) /* get SRR0 value */ | ||
679 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
680 | #endif /* CONFIG_PPC_ISERIES */ | ||
681 | 676 | ||
682 | mtlr r10 | 677 | mtlr r10 |
683 | 678 | ||
@@ -690,12 +685,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | |||
690 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | 685 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ |
691 | .machine pop | 686 | .machine pop |
692 | 687 | ||
693 | #ifdef CONFIG_PPC_ISERIES | ||
694 | BEGIN_FW_FTR_SECTION | ||
695 | mtspr SPRN_SRR0,r11 | ||
696 | mtspr SPRN_SRR1,r12 | ||
697 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
698 | #endif /* CONFIG_PPC_ISERIES */ | ||
699 | ld r9,PACA_EXSLB+EX_R9(r13) | 688 | ld r9,PACA_EXSLB+EX_R9(r13) |
700 | ld r10,PACA_EXSLB+EX_R10(r13) | 689 | ld r10,PACA_EXSLB+EX_R10(r13) |
701 | ld r11,PACA_EXSLB+EX_R11(r13) | 690 | ld r11,PACA_EXSLB+EX_R11(r13) |
@@ -704,13 +693,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | |||
704 | rfid | 693 | rfid |
705 | b . /* prevent speculative execution */ | 694 | b . /* prevent speculative execution */ |
706 | 695 | ||
707 | 2: | 696 | 2: mfspr r11,SPRN_SRR0 |
708 | #ifdef CONFIG_PPC_ISERIES | ||
709 | BEGIN_FW_FTR_SECTION | ||
710 | b unrecov_slb | ||
711 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
712 | #endif /* CONFIG_PPC_ISERIES */ | ||
713 | mfspr r11,SPRN_SRR0 | ||
714 | ld r10,PACAKBASE(r13) | 697 | ld r10,PACAKBASE(r13) |
715 | LOAD_HANDLER(r10,unrecov_slb) | 698 | LOAD_HANDLER(r10,unrecov_slb) |
716 | mtspr SPRN_SRR0,r10 | 699 | mtspr SPRN_SRR0,r10 |
@@ -727,20 +710,6 @@ unrecov_slb: | |||
727 | bl .unrecoverable_exception | 710 | bl .unrecoverable_exception |
728 | b 1b | 711 | b 1b |
729 | 712 | ||
730 | .align 7 | ||
731 | .globl hardware_interrupt_common | ||
732 | .globl hardware_interrupt_entry | ||
733 | hardware_interrupt_common: | ||
734 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) | ||
735 | FINISH_NAP | ||
736 | hardware_interrupt_entry: | ||
737 | DISABLE_INTS | ||
738 | BEGIN_FTR_SECTION | ||
739 | bl .ppc64_runlatch_on | ||
740 | END_FTR_SECTION_IFSET(CPU_FTR_CTRL) | ||
741 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
742 | bl .do_IRQ | ||
743 | b .ret_from_except_lite | ||
744 | 713 | ||
745 | #ifdef CONFIG_PPC_970_NAP | 714 | #ifdef CONFIG_PPC_970_NAP |
746 | power4_fixup_nap: | 715 | power4_fixup_nap: |
@@ -913,11 +882,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) | |||
913 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ | 882 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ |
914 | bne 77f /* then don't call hash_page now */ | 883 | bne 77f /* then don't call hash_page now */ |
915 | 884 | ||
916 | /* | 885 | /* We run with interrupts both soft and hard disabled */ |
917 | * On iSeries, we soft-disable interrupts here, then | ||
918 | * hard-enable interrupts so that the hash_page code can spin on | ||
919 | * the hash_table_lock without problems on a shared processor. | ||
920 | */ | ||
921 | DISABLE_INTS | 886 | DISABLE_INTS |
922 | 887 | ||
923 | /* | 888 | /* |
@@ -956,25 +921,11 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) | |||
956 | bl .hash_page /* build HPTE if possible */ | 921 | bl .hash_page /* build HPTE if possible */ |
957 | cmpdi r3,0 /* see if hash_page succeeded */ | 922 | cmpdi r3,0 /* see if hash_page succeeded */ |
958 | 923 | ||
959 | BEGIN_FW_FTR_SECTION | ||
960 | /* | ||
961 | * If we had interrupts soft-enabled at the point where the | ||
962 | * DSI/ISI occurred, and an interrupt came in during hash_page, | ||
963 | * handle it now. | ||
964 | * We jump to ret_from_except_lite rather than fast_exception_return | ||
965 | * because ret_from_except_lite will check for and handle pending | ||
966 | * interrupts if necessary. | ||
967 | */ | ||
968 | beq 13f | ||
969 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
970 | |||
971 | BEGIN_FW_FTR_SECTION | ||
972 | /* | 924 | /* |
973 | * Here we have interrupts hard-disabled, so it is sufficient | 925 | * Here we have interrupts hard-disabled, so it is sufficient |
974 | * to restore paca->{soft,hard}_enable and get out. | 926 | * to restore paca->{soft,hard}_enable and get out. |
975 | */ | 927 | */ |
976 | beq fast_exc_return_irq /* Return from exception on success */ | 928 | beq fast_exc_return_irq /* Return from exception on success */ |
977 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | ||
978 | 929 | ||
979 | /* For a hash failure, we don't bother re-enabling interrupts */ | 930 | /* For a hash failure, we don't bother re-enabling interrupts */ |
980 | ble- 12f | 931 | ble- 12f |
@@ -1141,51 +1092,19 @@ _GLOBAL(do_stab_bolted) | |||
1141 | .= 0x7000 | 1092 | .= 0x7000 |
1142 | .globl fwnmi_data_area | 1093 | .globl fwnmi_data_area |
1143 | fwnmi_data_area: | 1094 | fwnmi_data_area: |
1144 | #endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ | ||
1145 | |||
1146 | /* iSeries does not use the FWNMI stuff, so it is safe to put | ||
1147 | * this here, even if we later allow kernels that will boot on | ||
1148 | * both pSeries and iSeries */ | ||
1149 | #ifdef CONFIG_PPC_ISERIES | ||
1150 | . = LPARMAP_PHYS | ||
1151 | .globl xLparMap | ||
1152 | xLparMap: | ||
1153 | .quad HvEsidsToMap /* xNumberEsids */ | ||
1154 | .quad HvRangesToMap /* xNumberRanges */ | ||
1155 | .quad STAB0_PAGE /* xSegmentTableOffs */ | ||
1156 | .zero 40 /* xRsvd */ | ||
1157 | /* xEsids (HvEsidsToMap entries of 2 quads) */ | ||
1158 | .quad PAGE_OFFSET_ESID /* xKernelEsid */ | ||
1159 | .quad PAGE_OFFSET_VSID /* xKernelVsid */ | ||
1160 | .quad VMALLOC_START_ESID /* xKernelEsid */ | ||
1161 | .quad VMALLOC_START_VSID /* xKernelVsid */ | ||
1162 | /* xRanges (HvRangesToMap entries of 3 quads) */ | ||
1163 | .quad HvPagesToMap /* xPages */ | ||
1164 | .quad 0 /* xOffset */ | ||
1165 | .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ | ||
1166 | |||
1167 | #endif /* CONFIG_PPC_ISERIES */ | ||
1168 | 1095 | ||
1169 | #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) | ||
1170 | /* pseries and powernv need to keep the whole page from | 1096 | /* pseries and powernv need to keep the whole page from |
1171 | * 0x7000 to 0x8000 free for use by the firmware | 1097 | * 0x7000 to 0x8000 free for use by the firmware |
1172 | */ | 1098 | */ |
1173 | . = 0x8000 | 1099 | . = 0x8000 |
1174 | #endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ | 1100 | #endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ |
1175 | 1101 | ||
1176 | /* | 1102 | /* Space for CPU0's segment table */ |
1177 | * Space for CPU0's segment table. | 1103 | .balign 4096 |
1178 | * | ||
1179 | * On iSeries, the hypervisor must fill in at least one entry before | ||
1180 | * we get control (with relocate on). The address is given to the hv | ||
1181 | * as a page number (see xLparMap above), so this must be at a | ||
1182 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
1183 | * PAGE_SHIFT). | ||
1184 | */ | ||
1185 | . = STAB0_OFFSET /* 0x8000 */ | ||
1186 | .globl initial_stab | 1104 | .globl initial_stab |
1187 | initial_stab: | 1105 | initial_stab: |
1188 | .space 4096 | 1106 | .space 4096 |
1107 | |||
1189 | #ifdef CONFIG_PPC_POWERNV | 1108 | #ifdef CONFIG_PPC_POWERNV |
1190 | _GLOBAL(opal_mc_secondary_handler) | 1109 | _GLOBAL(opal_mc_secondary_handler) |
1191 | HMT_MEDIUM | 1110 | HMT_MEDIUM |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 06c7251c1bf7..40759fbfb171 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <asm/cputable.h> | 32 | #include <asm/cputable.h> |
33 | #include <asm/setup.h> | 33 | #include <asm/setup.h> |
34 | #include <asm/hvcall.h> | 34 | #include <asm/hvcall.h> |
35 | #include <asm/iseries/lpar_map.h> | ||
36 | #include <asm/thread_info.h> | 35 | #include <asm/thread_info.h> |
37 | #include <asm/firmware.h> | 36 | #include <asm/firmware.h> |
38 | #include <asm/page_64.h> | 37 | #include <asm/page_64.h> |
@@ -57,10 +56,6 @@ | |||
57 | * entry in r9 for debugging purposes | 56 | * entry in r9 for debugging purposes |
58 | * 2. Secondary processors enter at 0x60 with PIR in gpr3 | 57 | * 2. Secondary processors enter at 0x60 with PIR in gpr3 |
59 | * | 58 | * |
60 | * For iSeries: | ||
61 | * 1. The MMU is on (as it always is for iSeries) | ||
62 | * 2. The kernel is entered at system_reset_iSeries | ||
63 | * | ||
64 | * For Book3E processors: | 59 | * For Book3E processors: |
65 | * 1. The MMU is on running in AS0 in a state defined in ePAPR | 60 | * 1. The MMU is on running in AS0 in a state defined in ePAPR |
66 | * 2. The kernel is entered at __start | 61 | * 2. The kernel is entered at __start |
@@ -93,15 +88,6 @@ __secondary_hold_spinloop: | |||
93 | __secondary_hold_acknowledge: | 88 | __secondary_hold_acknowledge: |
94 | .llong 0x0 | 89 | .llong 0x0 |
95 | 90 | ||
96 | #ifdef CONFIG_PPC_ISERIES | ||
97 | /* | ||
98 | * At offset 0x20, there is a pointer to iSeries LPAR data. | ||
99 | * This is required by the hypervisor | ||
100 | */ | ||
101 | . = 0x20 | ||
102 | .llong hvReleaseData-KERNELBASE | ||
103 | #endif /* CONFIG_PPC_ISERIES */ | ||
104 | |||
105 | #ifdef CONFIG_RELOCATABLE | 91 | #ifdef CONFIG_RELOCATABLE |
106 | /* This flag is set to 1 by a loader if the kernel should run | 92 | /* This flag is set to 1 by a loader if the kernel should run |
107 | * at the loaded address instead of the linked address. This | 93 | * at the loaded address instead of the linked address. This |
@@ -582,7 +568,7 @@ _GLOBAL(pmac_secondary_start) | |||
582 | * 1. Processor number | 568 | * 1. Processor number |
583 | * 2. Segment table pointer (virtual address) | 569 | * 2. Segment table pointer (virtual address) |
584 | * On entry the following are set: | 570 | * On entry the following are set: |
585 | * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries | 571 | * r1 = stack pointer (real addr of temp stack) |
586 | * r24 = cpu# (in Linux terms) | 572 | * r24 = cpu# (in Linux terms) |
587 | * r13 = paca virtual address | 573 | * r13 = paca virtual address |
588 | * SPRG_PACA = paca virtual address | 574 | * SPRG_PACA = paca virtual address |
@@ -595,7 +581,7 @@ __secondary_start: | |||
595 | /* Set thread priority to MEDIUM */ | 581 | /* Set thread priority to MEDIUM */ |
596 | HMT_MEDIUM | 582 | HMT_MEDIUM |
597 | 583 | ||
598 | /* Initialize the kernel stack. Just a repeat for iSeries. */ | 584 | /* Initialize the kernel stack */ |
599 | LOAD_REG_ADDR(r3, current_set) | 585 | LOAD_REG_ADDR(r3, current_set) |
600 | sldi r28,r24,3 /* get current_set[cpu#] */ | 586 | sldi r28,r24,3 /* get current_set[cpu#] */ |
601 | ldx r14,r3,r28 | 587 | ldx r14,r3,r28 |
@@ -615,20 +601,13 @@ __secondary_start: | |||
615 | li r7,0 | 601 | li r7,0 |
616 | mtlr r7 | 602 | mtlr r7 |
617 | 603 | ||
604 | /* Mark interrupts both hard and soft disabled */ | ||
605 | stb r7,PACAHARDIRQEN(r13) | ||
606 | stb r7,PACASOFTIRQEN(r13) | ||
607 | |||
618 | /* enable MMU and jump to start_secondary */ | 608 | /* enable MMU and jump to start_secondary */ |
619 | LOAD_REG_ADDR(r3, .start_secondary_prolog) | 609 | LOAD_REG_ADDR(r3, .start_secondary_prolog) |
620 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) | 610 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) |
621 | #ifdef CONFIG_PPC_ISERIES | ||
622 | BEGIN_FW_FTR_SECTION | ||
623 | ori r4,r4,MSR_EE | ||
624 | li r8,1 | ||
625 | stb r8,PACAHARDIRQEN(r13) | ||
626 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
627 | #endif | ||
628 | BEGIN_FW_FTR_SECTION | ||
629 | stb r7,PACAHARDIRQEN(r13) | ||
630 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | ||
631 | stb r7,PACASOFTIRQEN(r13) | ||
632 | 611 | ||
633 | mtspr SPRN_SRR0,r3 | 612 | mtspr SPRN_SRR0,r3 |
634 | mtspr SPRN_SRR1,r4 | 613 | mtspr SPRN_SRR1,r4 |
@@ -774,17 +753,8 @@ _INIT_GLOBAL(start_here_common) | |||
774 | bl .setup_system | 753 | bl .setup_system |
775 | 754 | ||
776 | /* Load up the kernel context */ | 755 | /* Load up the kernel context */ |
777 | 5: | 756 | 5: li r5,0 |
778 | li r5,0 | ||
779 | stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */ | 757 | stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */ |
780 | #ifdef CONFIG_PPC_ISERIES | ||
781 | BEGIN_FW_FTR_SECTION | ||
782 | mfmsr r5 | ||
783 | ori r5,r5,MSR_EE /* Hard Enabled on iSeries*/ | ||
784 | mtmsrd r5 | ||
785 | li r5,1 | ||
786 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
787 | #endif | ||
788 | stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */ | 758 | stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */ |
789 | 759 | ||
790 | bl .start_kernel | 760 | bl .start_kernel |
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S index b69463ec2010..ba16874fe294 100644 --- a/arch/powerpc/kernel/misc.S +++ b/arch/powerpc/kernel/misc.S | |||
@@ -5,7 +5,6 @@ | |||
5 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) | 5 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) |
6 | * and Paul Mackerras. | 6 | * and Paul Mackerras. |
7 | * | 7 | * |
8 | * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) | ||
9 | * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) | 8 | * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) |
10 | * | 9 | * |
11 | * setjmp/longjmp code by Paul Mackerras. | 10 | * setjmp/longjmp code by Paul Mackerras. |
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 710a54005dfb..65d1c08cf09e 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S | |||
@@ -109,11 +109,6 @@ SECTIONS | |||
109 | __ptov_table_begin = .; | 109 | __ptov_table_begin = .; |
110 | *(.ptov_fixup); | 110 | *(.ptov_fixup); |
111 | __ptov_table_end = .; | 111 | __ptov_table_end = .; |
112 | #ifdef CONFIG_PPC_ISERIES | ||
113 | __dt_strings_start = .; | ||
114 | *(.dt_strings); | ||
115 | __dt_strings_end = .; | ||
116 | #endif | ||
117 | } | 112 | } |
118 | 113 | ||
119 | .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { | 114 | .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { |
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index ef653dc95b65..b9ee79ce2200 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S | |||
@@ -217,21 +217,6 @@ slb_finish_load: | |||
217 | * free slot first but that took too long. Unfortunately we | 217 | * free slot first but that took too long. Unfortunately we |
218 | * dont have any LRU information to help us choose a slot. | 218 | * dont have any LRU information to help us choose a slot. |
219 | */ | 219 | */ |
220 | #ifdef CONFIG_PPC_ISERIES | ||
221 | BEGIN_FW_FTR_SECTION | ||
222 | /* | ||
223 | * On iSeries, the "bolted" stack segment can be cast out on | ||
224 | * shared processor switch so we need to check for a miss on | ||
225 | * it and restore it to the right slot. | ||
226 | */ | ||
227 | ld r9,PACAKSAVE(r13) | ||
228 | clrrdi r9,r9,28 | ||
229 | clrrdi r3,r3,28 | ||
230 | li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */ | ||
231 | cmpld r9,r3 | ||
232 | beq 3f | ||
233 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
234 | #endif /* CONFIG_PPC_ISERIES */ | ||
235 | 220 | ||
236 | 7: ld r10,PACASTABRR(r13) | 221 | 7: ld r10,PACASTABRR(r13) |
237 | addi r10,r10,1 | 222 | addi r10,r10,1 |
@@ -282,7 +267,6 @@ _GLOBAL(slb_compare_rr_to_size) | |||
282 | 267 | ||
283 | /* | 268 | /* |
284 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. | 269 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. |
285 | * We assume legacy iSeries will never have 1T segments. | ||
286 | * | 270 | * |
287 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 | 271 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 |
288 | */ | 272 | */ |