diff options
Diffstat (limited to 'arch/ppc64/kernel/head.S')
| -rw-r--r-- | arch/ppc64/kernel/head.S | 523 |
1 files changed, 206 insertions, 317 deletions
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S index accaa052d31f..036959775623 100644 --- a/arch/ppc64/kernel/head.S +++ b/arch/ppc64/kernel/head.S | |||
| @@ -23,14 +23,11 @@ | |||
| 23 | * 2 of the License, or (at your option) any later version. | 23 | * 2 of the License, or (at your option) any later version. |
| 24 | */ | 24 | */ |
| 25 | 25 | ||
| 26 | #define SECONDARY_PROCESSORS | ||
| 27 | |||
| 28 | #include <linux/config.h> | 26 | #include <linux/config.h> |
| 29 | #include <linux/threads.h> | 27 | #include <linux/threads.h> |
| 30 | #include <asm/processor.h> | 28 | #include <asm/processor.h> |
| 31 | #include <asm/page.h> | 29 | #include <asm/page.h> |
| 32 | #include <asm/mmu.h> | 30 | #include <asm/mmu.h> |
| 33 | #include <asm/naca.h> | ||
| 34 | #include <asm/systemcfg.h> | 31 | #include <asm/systemcfg.h> |
| 35 | #include <asm/ppc_asm.h> | 32 | #include <asm/ppc_asm.h> |
| 36 | #include <asm/offsets.h> | 33 | #include <asm/offsets.h> |
| @@ -45,18 +42,13 @@ | |||
| 45 | #endif | 42 | #endif |
| 46 | 43 | ||
| 47 | /* | 44 | /* |
| 48 | * hcall interface to pSeries LPAR | ||
| 49 | */ | ||
| 50 | #define H_SET_ASR 0x30 | ||
| 51 | |||
| 52 | /* | ||
| 53 | * We layout physical memory as follows: | 45 | * We layout physical memory as follows: |
| 54 | * 0x0000 - 0x00ff : Secondary processor spin code | 46 | * 0x0000 - 0x00ff : Secondary processor spin code |
| 55 | * 0x0100 - 0x2fff : pSeries Interrupt prologs | 47 | * 0x0100 - 0x2fff : pSeries Interrupt prologs |
| 56 | * 0x3000 - 0x3fff : Interrupt support | 48 | * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs |
| 57 | * 0x4000 - 0x4fff : NACA | 49 | * 0x6000 - 0x6fff : Initial (CPU0) segment table |
| 58 | * 0x6000 : iSeries and common interrupt prologs | 50 | * 0x7000 - 0x7fff : FWNMI data area |
| 59 | * 0x9000 - 0x9fff : Initial segment table | 51 | * 0x8000 - : Early init and support code |
| 60 | */ | 52 | */ |
| 61 | 53 | ||
| 62 | /* | 54 | /* |
| @@ -94,6 +86,7 @@ END_FTR_SECTION(0, 1) | |||
| 94 | 86 | ||
| 95 | /* Catch branch to 0 in real mode */ | 87 | /* Catch branch to 0 in real mode */ |
| 96 | trap | 88 | trap |
| 89 | |||
| 97 | #ifdef CONFIG_PPC_ISERIES | 90 | #ifdef CONFIG_PPC_ISERIES |
| 98 | /* | 91 | /* |
| 99 | * At offset 0x20, there is a pointer to iSeries LPAR data. | 92 | * At offset 0x20, there is a pointer to iSeries LPAR data. |
| @@ -103,12 +96,12 @@ END_FTR_SECTION(0, 1) | |||
| 103 | .llong hvReleaseData-KERNELBASE | 96 | .llong hvReleaseData-KERNELBASE |
| 104 | 97 | ||
| 105 | /* | 98 | /* |
| 106 | * At offset 0x28 and 0x30 are offsets to the msChunks | 99 | * At offset 0x28 and 0x30 are offsets to the mschunks_map |
| 107 | * array (used by the iSeries LPAR debugger to do translation | 100 | * array (used by the iSeries LPAR debugger to do translation |
| 108 | * between physical addresses and absolute addresses) and | 101 | * between physical addresses and absolute addresses) and |
| 109 | * to the pidhash table (also used by the debugger) | 102 | * to the pidhash table (also used by the debugger) |
| 110 | */ | 103 | */ |
| 111 | .llong msChunks-KERNELBASE | 104 | .llong mschunks_map-KERNELBASE |
| 112 | .llong 0 /* pidhash-KERNELBASE SFRXXX */ | 105 | .llong 0 /* pidhash-KERNELBASE SFRXXX */ |
| 113 | 106 | ||
| 114 | /* Offset 0x38 - Pointer to start of embedded System.map */ | 107 | /* Offset 0x38 - Pointer to start of embedded System.map */ |
| @@ -120,7 +113,7 @@ embedded_sysmap_start: | |||
| 120 | embedded_sysmap_end: | 113 | embedded_sysmap_end: |
| 121 | .llong 0 | 114 | .llong 0 |
| 122 | 115 | ||
| 123 | #else /* CONFIG_PPC_ISERIES */ | 116 | #endif /* CONFIG_PPC_ISERIES */ |
| 124 | 117 | ||
| 125 | /* Secondary processors spin on this value until it goes to 1. */ | 118 | /* Secondary processors spin on this value until it goes to 1. */ |
| 126 | .globl __secondary_hold_spinloop | 119 | .globl __secondary_hold_spinloop |
| @@ -155,7 +148,7 @@ _GLOBAL(__secondary_hold) | |||
| 155 | std r24,__secondary_hold_acknowledge@l(0) | 148 | std r24,__secondary_hold_acknowledge@l(0) |
| 156 | sync | 149 | sync |
| 157 | 150 | ||
| 158 | /* All secondary cpu's wait here until told to start. */ | 151 | /* All secondary cpus wait here until told to start. */ |
| 159 | 100: ld r4,__secondary_hold_spinloop@l(0) | 152 | 100: ld r4,__secondary_hold_spinloop@l(0) |
| 160 | cmpdi 0,r4,1 | 153 | cmpdi 0,r4,1 |
| 161 | bne 100b | 154 | bne 100b |
| @@ -170,7 +163,6 @@ _GLOBAL(__secondary_hold) | |||
| 170 | BUG_OPCODE | 163 | BUG_OPCODE |
| 171 | #endif | 164 | #endif |
| 172 | #endif | 165 | #endif |
| 173 | #endif | ||
| 174 | 166 | ||
| 175 | /* This value is used to mark exception frames on the stack. */ | 167 | /* This value is used to mark exception frames on the stack. */ |
| 176 | .section ".toc","aw" | 168 | .section ".toc","aw" |
| @@ -502,33 +494,37 @@ system_call_pSeries: | |||
| 502 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) | 494 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) |
| 503 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) | 495 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) |
| 504 | 496 | ||
| 497 | . = 0x3000 | ||
| 498 | |||
| 499 | /*** pSeries interrupt support ***/ | ||
| 500 | |||
| 505 | /* moved from 0xf00 */ | 501 | /* moved from 0xf00 */ |
| 506 | STD_EXCEPTION_PSERIES(0x3000, performance_monitor) | 502 | STD_EXCEPTION_PSERIES(., performance_monitor) |
| 507 | 503 | ||
| 508 | . = 0x3100 | 504 | .align 7 |
| 509 | _GLOBAL(do_stab_bolted_pSeries) | 505 | _GLOBAL(do_stab_bolted_pSeries) |
| 510 | mtcrf 0x80,r12 | 506 | mtcrf 0x80,r12 |
| 511 | mfspr r12,SPRG2 | 507 | mfspr r12,SPRG2 |
| 512 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) | 508 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) |
| 513 | 509 | ||
| 514 | 510 | /* | |
| 515 | /* Space for the naca. Architected to be located at real address | 511 | * Vectors for the FWNMI option. Share common code. |
| 516 | * NACA_PHYS_ADDR. Various tools rely on this location being fixed. | 512 | */ |
| 517 | * The first dword of the naca is required by iSeries LPAR to | 513 | .globl system_reset_fwnmi |
| 518 | * point to itVpdAreas. On pSeries native, this value is not used. | 514 | system_reset_fwnmi: |
| 519 | */ | 515 | HMT_MEDIUM |
| 520 | . = NACA_PHYS_ADDR | 516 | mtspr SPRG1,r13 /* save r13 */ |
| 521 | .globl __end_interrupts | 517 | RUNLATCH_ON(r13) |
| 522 | __end_interrupts: | 518 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) |
| 523 | #ifdef CONFIG_PPC_ISERIES | ||
| 524 | .globl naca | ||
| 525 | naca: | ||
| 526 | .llong itVpdAreas | ||
| 527 | .llong 0 /* xRamDisk */ | ||
| 528 | .llong 0 /* xRamDiskSize */ | ||
| 529 | 519 | ||
| 530 | . = 0x6100 | 520 | .globl machine_check_fwnmi |
| 521 | machine_check_fwnmi: | ||
| 522 | HMT_MEDIUM | ||
| 523 | mtspr SPRG1,r13 /* save r13 */ | ||
| 524 | RUNLATCH_ON(r13) | ||
| 525 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | ||
| 531 | 526 | ||
| 527 | #ifdef CONFIG_PPC_ISERIES | ||
| 532 | /*** ISeries-LPAR interrupt handlers ***/ | 528 | /*** ISeries-LPAR interrupt handlers ***/ |
| 533 | 529 | ||
| 534 | STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) | 530 | STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) |
| @@ -626,9 +622,7 @@ system_reset_iSeries: | |||
| 626 | 622 | ||
| 627 | cmpwi 0,r23,0 | 623 | cmpwi 0,r23,0 |
| 628 | beq iSeries_secondary_smp_loop /* Loop until told to go */ | 624 | beq iSeries_secondary_smp_loop /* Loop until told to go */ |
| 629 | #ifdef SECONDARY_PROCESSORS | ||
| 630 | bne .__secondary_start /* Loop until told to go */ | 625 | bne .__secondary_start /* Loop until told to go */ |
| 631 | #endif | ||
| 632 | iSeries_secondary_smp_loop: | 626 | iSeries_secondary_smp_loop: |
| 633 | /* Let the Hypervisor know we are alive */ | 627 | /* Let the Hypervisor know we are alive */ |
| 634 | /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ | 628 | /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ |
| @@ -671,51 +665,8 @@ hardware_interrupt_iSeries_masked: | |||
| 671 | ld r13,PACA_EXGEN+EX_R13(r13) | 665 | ld r13,PACA_EXGEN+EX_R13(r13) |
| 672 | rfid | 666 | rfid |
| 673 | b . /* prevent speculative execution */ | 667 | b . /* prevent speculative execution */ |
| 674 | #endif | ||
| 675 | |||
| 676 | /* | ||
| 677 | * Data area reserved for FWNMI option. | ||
| 678 | */ | ||
| 679 | .= 0x7000 | ||
| 680 | .globl fwnmi_data_area | ||
| 681 | fwnmi_data_area: | ||
| 682 | |||
| 683 | #ifdef CONFIG_PPC_ISERIES | ||
| 684 | . = LPARMAP_PHYS | ||
| 685 | #include "lparmap.s" | ||
| 686 | #endif /* CONFIG_PPC_ISERIES */ | 668 | #endif /* CONFIG_PPC_ISERIES */ |
| 687 | 669 | ||
| 688 | /* | ||
| 689 | * Vectors for the FWNMI option. Share common code. | ||
| 690 | */ | ||
| 691 | . = 0x8000 | ||
| 692 | .globl system_reset_fwnmi | ||
| 693 | system_reset_fwnmi: | ||
| 694 | HMT_MEDIUM | ||
| 695 | mtspr SPRG1,r13 /* save r13 */ | ||
| 696 | RUNLATCH_ON(r13) | ||
| 697 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | ||
| 698 | .globl machine_check_fwnmi | ||
| 699 | machine_check_fwnmi: | ||
| 700 | HMT_MEDIUM | ||
| 701 | mtspr SPRG1,r13 /* save r13 */ | ||
| 702 | RUNLATCH_ON(r13) | ||
| 703 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | ||
| 704 | |||
| 705 | /* | ||
| 706 | * Space for the initial segment table | ||
| 707 | * For LPAR, the hypervisor must fill in at least one entry | ||
| 708 | * before we get control (with relocate on) | ||
| 709 | */ | ||
| 710 | . = STAB0_PHYS_ADDR | ||
| 711 | .globl __start_stab | ||
| 712 | __start_stab: | ||
| 713 | |||
| 714 | . = (STAB0_PHYS_ADDR + PAGE_SIZE) | ||
| 715 | .globl __end_stab | ||
| 716 | __end_stab: | ||
| 717 | |||
| 718 | |||
| 719 | /*** Common interrupt handlers ***/ | 670 | /*** Common interrupt handlers ***/ |
| 720 | 671 | ||
| 721 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) | 672 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) |
| @@ -752,8 +703,8 @@ machine_check_common: | |||
| 752 | * R9 contains the saved CR, r13 points to the paca, | 703 | * R9 contains the saved CR, r13 points to the paca, |
| 753 | * r10 contains the (bad) kernel stack pointer, | 704 | * r10 contains the (bad) kernel stack pointer, |
| 754 | * r11 and r12 contain the saved SRR0 and SRR1. | 705 | * r11 and r12 contain the saved SRR0 and SRR1. |
| 755 | * We switch to using the paca guard page as an emergency stack, | 706 | * We switch to using an emergency stack, save the registers there, |
| 756 | * save the registers there, and call kernel_bad_stack(), which panics. | 707 | * and call kernel_bad_stack(), which panics. |
| 757 | */ | 708 | */ |
| 758 | bad_stack: | 709 | bad_stack: |
| 759 | ld r1,PACAEMERGSP(r13) | 710 | ld r1,PACAEMERGSP(r13) |
| @@ -906,6 +857,62 @@ fp_unavailable_common: | |||
| 906 | bl .kernel_fp_unavailable_exception | 857 | bl .kernel_fp_unavailable_exception |
| 907 | BUG_OPCODE | 858 | BUG_OPCODE |
| 908 | 859 | ||
| 860 | /* | ||
| 861 | * load_up_fpu(unused, unused, tsk) | ||
| 862 | * Disable FP for the task which had the FPU previously, | ||
| 863 | * and save its floating-point registers in its thread_struct. | ||
| 864 | * Enables the FPU for use in the kernel on return. | ||
| 865 | * On SMP we know the fpu is free, since we give it up every | ||
| 866 | * switch (ie, no lazy save of the FP registers). | ||
| 867 | * On entry: r13 == 'current' && last_task_used_math != 'current' | ||
| 868 | */ | ||
| 869 | _STATIC(load_up_fpu) | ||
| 870 | mfmsr r5 /* grab the current MSR */ | ||
| 871 | ori r5,r5,MSR_FP | ||
| 872 | mtmsrd r5 /* enable use of fpu now */ | ||
| 873 | isync | ||
| 874 | /* | ||
| 875 | * For SMP, we don't do lazy FPU switching because it just gets too | ||
| 876 | * horrendously complex, especially when a task switches from one CPU | ||
| 877 | * to another. Instead we call giveup_fpu in switch_to. | ||
| 878 | * | ||
| 879 | */ | ||
| 880 | #ifndef CONFIG_SMP | ||
| 881 | ld r3,last_task_used_math@got(r2) | ||
| 882 | ld r4,0(r3) | ||
| 883 | cmpdi 0,r4,0 | ||
| 884 | beq 1f | ||
| 885 | /* Save FP state to last_task_used_math's THREAD struct */ | ||
| 886 | addi r4,r4,THREAD | ||
| 887 | SAVE_32FPRS(0, r4) | ||
| 888 | mffs fr0 | ||
| 889 | stfd fr0,THREAD_FPSCR(r4) | ||
| 890 | /* Disable FP for last_task_used_math */ | ||
| 891 | ld r5,PT_REGS(r4) | ||
| 892 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 893 | li r6,MSR_FP|MSR_FE0|MSR_FE1 | ||
| 894 | andc r4,r4,r6 | ||
| 895 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 896 | 1: | ||
| 897 | #endif /* CONFIG_SMP */ | ||
| 898 | /* enable use of FP after return */ | ||
| 899 | ld r4,PACACURRENT(r13) | ||
| 900 | addi r5,r4,THREAD /* Get THREAD */ | ||
| 901 | ld r4,THREAD_FPEXC_MODE(r5) | ||
| 902 | ori r12,r12,MSR_FP | ||
| 903 | or r12,r12,r4 | ||
| 904 | std r12,_MSR(r1) | ||
| 905 | lfd fr0,THREAD_FPSCR(r5) | ||
| 906 | mtfsf 0xff,fr0 | ||
| 907 | REST_32FPRS(0, r5) | ||
| 908 | #ifndef CONFIG_SMP | ||
| 909 | /* Update last_task_used_math to 'current' */ | ||
| 910 | subi r4,r5,THREAD /* Back to 'current' */ | ||
| 911 | std r4,0(r3) | ||
| 912 | #endif /* CONFIG_SMP */ | ||
| 913 | /* restore registers and return */ | ||
| 914 | b fast_exception_return | ||
| 915 | |||
| 909 | .align 7 | 916 | .align 7 |
| 910 | .globl altivec_unavailable_common | 917 | .globl altivec_unavailable_common |
| 911 | altivec_unavailable_common: | 918 | altivec_unavailable_common: |
| @@ -921,6 +928,80 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
| 921 | bl .altivec_unavailable_exception | 928 | bl .altivec_unavailable_exception |
| 922 | b .ret_from_except | 929 | b .ret_from_except |
| 923 | 930 | ||
| 931 | #ifdef CONFIG_ALTIVEC | ||
| 932 | /* | ||
| 933 | * load_up_altivec(unused, unused, tsk) | ||
| 934 | * Disable VMX for the task which had it previously, | ||
| 935 | * and save its vector registers in its thread_struct. | ||
| 936 | * Enables the VMX for use in the kernel on return. | ||
| 937 | * On SMP we know the VMX is free, since we give it up every | ||
| 938 | * switch (ie, no lazy save of the vector registers). | ||
| 939 | * On entry: r13 == 'current' && last_task_used_altivec != 'current' | ||
| 940 | */ | ||
| 941 | _STATIC(load_up_altivec) | ||
| 942 | mfmsr r5 /* grab the current MSR */ | ||
| 943 | oris r5,r5,MSR_VEC@h | ||
| 944 | mtmsrd r5 /* enable use of VMX now */ | ||
| 945 | isync | ||
| 946 | |||
| 947 | /* | ||
| 948 | * For SMP, we don't do lazy VMX switching because it just gets too | ||
| 949 | * horrendously complex, especially when a task switches from one CPU | ||
| 950 | * to another. Instead we call giveup_altvec in switch_to. | ||
| 951 | * VRSAVE isn't dealt with here, that is done in the normal context | ||
| 952 | * switch code. Note that we could rely on vrsave value to eventually | ||
| 953 | * avoid saving all of the VREGs here... | ||
| 954 | */ | ||
| 955 | #ifndef CONFIG_SMP | ||
| 956 | ld r3,last_task_used_altivec@got(r2) | ||
| 957 | ld r4,0(r3) | ||
| 958 | cmpdi 0,r4,0 | ||
| 959 | beq 1f | ||
| 960 | /* Save VMX state to last_task_used_altivec's THREAD struct */ | ||
| 961 | addi r4,r4,THREAD | ||
| 962 | SAVE_32VRS(0,r5,r4) | ||
| 963 | mfvscr vr0 | ||
| 964 | li r10,THREAD_VSCR | ||
| 965 | stvx vr0,r10,r4 | ||
| 966 | /* Disable VMX for last_task_used_altivec */ | ||
| 967 | ld r5,PT_REGS(r4) | ||
| 968 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 969 | lis r6,MSR_VEC@h | ||
| 970 | andc r4,r4,r6 | ||
| 971 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 972 | 1: | ||
| 973 | #endif /* CONFIG_SMP */ | ||
| 974 | /* Hack: if we get an altivec unavailable trap with VRSAVE | ||
| 975 | * set to all zeros, we assume this is a broken application | ||
| 976 | * that fails to set it properly, and thus we switch it to | ||
| 977 | * all 1's | ||
| 978 | */ | ||
| 979 | mfspr r4,SPRN_VRSAVE | ||
| 980 | cmpdi 0,r4,0 | ||
| 981 | bne+ 1f | ||
| 982 | li r4,-1 | ||
| 983 | mtspr SPRN_VRSAVE,r4 | ||
| 984 | 1: | ||
| 985 | /* enable use of VMX after return */ | ||
| 986 | ld r4,PACACURRENT(r13) | ||
| 987 | addi r5,r4,THREAD /* Get THREAD */ | ||
| 988 | oris r12,r12,MSR_VEC@h | ||
| 989 | std r12,_MSR(r1) | ||
| 990 | li r4,1 | ||
| 991 | li r10,THREAD_VSCR | ||
| 992 | stw r4,THREAD_USED_VR(r5) | ||
| 993 | lvx vr0,r10,r5 | ||
| 994 | mtvscr vr0 | ||
| 995 | REST_32VRS(0,r4,r5) | ||
| 996 | #ifndef CONFIG_SMP | ||
| 997 | /* Update last_task_used_math to 'current' */ | ||
| 998 | subi r4,r5,THREAD /* Back to 'current' */ | ||
| 999 | std r4,0(r3) | ||
| 1000 | #endif /* CONFIG_SMP */ | ||
| 1001 | /* restore registers and return */ | ||
| 1002 | b fast_exception_return | ||
| 1003 | #endif /* CONFIG_ALTIVEC */ | ||
| 1004 | |||
| 924 | /* | 1005 | /* |
| 925 | * Hash table stuff | 1006 | * Hash table stuff |
| 926 | */ | 1007 | */ |
| @@ -1167,6 +1248,42 @@ unrecov_slb: | |||
| 1167 | bl .unrecoverable_exception | 1248 | bl .unrecoverable_exception |
| 1168 | b 1b | 1249 | b 1b |
| 1169 | 1250 | ||
| 1251 | /* | ||
| 1252 | * Space for CPU0's segment table. | ||
| 1253 | * | ||
| 1254 | * On iSeries, the hypervisor must fill in at least one entry before | ||
| 1255 | * we get control (with relocate on). The address is give to the hv | ||
| 1256 | * as a page number (see xLparMap in LparData.c), so this must be at a | ||
| 1257 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
| 1258 | * PAGE_SHIFT). | ||
| 1259 | */ | ||
| 1260 | . = STAB0_PHYS_ADDR /* 0x6000 */ | ||
| 1261 | .globl initial_stab | ||
| 1262 | initial_stab: | ||
| 1263 | .space 4096 | ||
| 1264 | |||
| 1265 | /* | ||
| 1266 | * Data area reserved for FWNMI option. | ||
| 1267 | * This address (0x7000) is fixed by the RPA. | ||
| 1268 | */ | ||
| 1269 | .= 0x7000 | ||
| 1270 | .globl fwnmi_data_area | ||
| 1271 | fwnmi_data_area: | ||
| 1272 | |||
| 1273 | /* iSeries does not use the FWNMI stuff, so it is safe to put | ||
| 1274 | * this here, even if we later allow kernels that will boot on | ||
| 1275 | * both pSeries and iSeries */ | ||
| 1276 | #ifdef CONFIG_PPC_ISERIES | ||
| 1277 | . = LPARMAP_PHYS | ||
| 1278 | #include "lparmap.s" | ||
| 1279 | /* | ||
| 1280 | * This ".text" is here for old compilers that generate a trailing | ||
| 1281 | * .note section when compiling .c files to .s | ||
| 1282 | */ | ||
| 1283 | .text | ||
| 1284 | #endif /* CONFIG_PPC_ISERIES */ | ||
| 1285 | |||
| 1286 | . = 0x8000 | ||
| 1170 | 1287 | ||
| 1171 | /* | 1288 | /* |
| 1172 | * On pSeries, secondary processors spin in the following code. | 1289 | * On pSeries, secondary processors spin in the following code. |
| @@ -1200,7 +1317,7 @@ _GLOBAL(pSeries_secondary_smp_init) | |||
| 1200 | b .kexec_wait /* next kernel might do better */ | 1317 | b .kexec_wait /* next kernel might do better */ |
| 1201 | 1318 | ||
| 1202 | 2: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ | 1319 | 2: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ |
| 1203 | /* From now on, r24 is expected to be logica cpuid */ | 1320 | /* From now on, r24 is expected to be logical cpuid */ |
| 1204 | mr r24,r5 | 1321 | mr r24,r5 |
| 1205 | 3: HMT_LOW | 1322 | 3: HMT_LOW |
| 1206 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ | 1323 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ |
| @@ -1213,10 +1330,8 @@ _GLOBAL(pSeries_secondary_smp_init) | |||
| 1213 | 1330 | ||
| 1214 | cmpwi 0,r23,0 | 1331 | cmpwi 0,r23,0 |
| 1215 | #ifdef CONFIG_SMP | 1332 | #ifdef CONFIG_SMP |
| 1216 | #ifdef SECONDARY_PROCESSORS | ||
| 1217 | bne .__secondary_start | 1333 | bne .__secondary_start |
| 1218 | #endif | 1334 | #endif |
| 1219 | #endif | ||
| 1220 | b 3b /* Loop until told to go */ | 1335 | b 3b /* Loop until told to go */ |
| 1221 | 1336 | ||
| 1222 | #ifdef CONFIG_PPC_ISERIES | 1337 | #ifdef CONFIG_PPC_ISERIES |
| @@ -1430,228 +1545,6 @@ _GLOBAL(copy_and_flush) | |||
| 1430 | .align 8 | 1545 | .align 8 |
| 1431 | copy_to_here: | 1546 | copy_to_here: |
| 1432 | 1547 | ||
| 1433 | /* | ||
| 1434 | * load_up_fpu(unused, unused, tsk) | ||
| 1435 | * Disable FP for the task which had the FPU previously, | ||
| 1436 | * and save its floating-point registers in its thread_struct. | ||
| 1437 | * Enables the FPU for use in the kernel on return. | ||
| 1438 | * On SMP we know the fpu is free, since we give it up every | ||
| 1439 | * switch (ie, no lazy save of the FP registers). | ||
| 1440 | * On entry: r13 == 'current' && last_task_used_math != 'current' | ||
| 1441 | */ | ||
| 1442 | _STATIC(load_up_fpu) | ||
| 1443 | mfmsr r5 /* grab the current MSR */ | ||
| 1444 | ori r5,r5,MSR_FP | ||
| 1445 | mtmsrd r5 /* enable use of fpu now */ | ||
| 1446 | isync | ||
| 1447 | /* | ||
| 1448 | * For SMP, we don't do lazy FPU switching because it just gets too | ||
| 1449 | * horrendously complex, especially when a task switches from one CPU | ||
| 1450 | * to another. Instead we call giveup_fpu in switch_to. | ||
| 1451 | * | ||
| 1452 | */ | ||
| 1453 | #ifndef CONFIG_SMP | ||
| 1454 | ld r3,last_task_used_math@got(r2) | ||
| 1455 | ld r4,0(r3) | ||
| 1456 | cmpdi 0,r4,0 | ||
| 1457 | beq 1f | ||
| 1458 | /* Save FP state to last_task_used_math's THREAD struct */ | ||
| 1459 | addi r4,r4,THREAD | ||
| 1460 | SAVE_32FPRS(0, r4) | ||
| 1461 | mffs fr0 | ||
| 1462 | stfd fr0,THREAD_FPSCR(r4) | ||
| 1463 | /* Disable FP for last_task_used_math */ | ||
| 1464 | ld r5,PT_REGS(r4) | ||
| 1465 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 1466 | li r6,MSR_FP|MSR_FE0|MSR_FE1 | ||
| 1467 | andc r4,r4,r6 | ||
| 1468 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 1469 | 1: | ||
| 1470 | #endif /* CONFIG_SMP */ | ||
| 1471 | /* enable use of FP after return */ | ||
| 1472 | ld r4,PACACURRENT(r13) | ||
| 1473 | addi r5,r4,THREAD /* Get THREAD */ | ||
| 1474 | ld r4,THREAD_FPEXC_MODE(r5) | ||
| 1475 | ori r12,r12,MSR_FP | ||
| 1476 | or r12,r12,r4 | ||
| 1477 | std r12,_MSR(r1) | ||
| 1478 | lfd fr0,THREAD_FPSCR(r5) | ||
| 1479 | mtfsf 0xff,fr0 | ||
| 1480 | REST_32FPRS(0, r5) | ||
| 1481 | #ifndef CONFIG_SMP | ||
| 1482 | /* Update last_task_used_math to 'current' */ | ||
| 1483 | subi r4,r5,THREAD /* Back to 'current' */ | ||
| 1484 | std r4,0(r3) | ||
| 1485 | #endif /* CONFIG_SMP */ | ||
| 1486 | /* restore registers and return */ | ||
| 1487 | b fast_exception_return | ||
| 1488 | |||
| 1489 | /* | ||
| 1490 | * disable_kernel_fp() | ||
| 1491 | * Disable the FPU. | ||
| 1492 | */ | ||
| 1493 | _GLOBAL(disable_kernel_fp) | ||
| 1494 | mfmsr r3 | ||
| 1495 | rldicl r0,r3,(63-MSR_FP_LG),1 | ||
| 1496 | rldicl r3,r0,(MSR_FP_LG+1),0 | ||
| 1497 | mtmsrd r3 /* disable use of fpu now */ | ||
| 1498 | isync | ||
| 1499 | blr | ||
| 1500 | |||
| 1501 | /* | ||
| 1502 | * giveup_fpu(tsk) | ||
| 1503 | * Disable FP for the task given as the argument, | ||
| 1504 | * and save the floating-point registers in its thread_struct. | ||
| 1505 | * Enables the FPU for use in the kernel on return. | ||
| 1506 | */ | ||
| 1507 | _GLOBAL(giveup_fpu) | ||
| 1508 | mfmsr r5 | ||
| 1509 | ori r5,r5,MSR_FP | ||
| 1510 | mtmsrd r5 /* enable use of fpu now */ | ||
| 1511 | isync | ||
| 1512 | cmpdi 0,r3,0 | ||
| 1513 | beqlr- /* if no previous owner, done */ | ||
| 1514 | addi r3,r3,THREAD /* want THREAD of task */ | ||
| 1515 | ld r5,PT_REGS(r3) | ||
| 1516 | cmpdi 0,r5,0 | ||
| 1517 | SAVE_32FPRS(0, r3) | ||
| 1518 | mffs fr0 | ||
| 1519 | stfd fr0,THREAD_FPSCR(r3) | ||
| 1520 | beq 1f | ||
| 1521 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 1522 | li r3,MSR_FP|MSR_FE0|MSR_FE1 | ||
| 1523 | andc r4,r4,r3 /* disable FP for previous task */ | ||
| 1524 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 1525 | 1: | ||
| 1526 | #ifndef CONFIG_SMP | ||
| 1527 | li r5,0 | ||
| 1528 | ld r4,last_task_used_math@got(r2) | ||
| 1529 | std r5,0(r4) | ||
| 1530 | #endif /* CONFIG_SMP */ | ||
| 1531 | blr | ||
| 1532 | |||
| 1533 | |||
| 1534 | #ifdef CONFIG_ALTIVEC | ||
| 1535 | |||
| 1536 | /* | ||
| 1537 | * load_up_altivec(unused, unused, tsk) | ||
| 1538 | * Disable VMX for the task which had it previously, | ||
| 1539 | * and save its vector registers in its thread_struct. | ||
| 1540 | * Enables the VMX for use in the kernel on return. | ||
| 1541 | * On SMP we know the VMX is free, since we give it up every | ||
| 1542 | * switch (ie, no lazy save of the vector registers). | ||
| 1543 | * On entry: r13 == 'current' && last_task_used_altivec != 'current' | ||
| 1544 | */ | ||
| 1545 | _STATIC(load_up_altivec) | ||
| 1546 | mfmsr r5 /* grab the current MSR */ | ||
| 1547 | oris r5,r5,MSR_VEC@h | ||
| 1548 | mtmsrd r5 /* enable use of VMX now */ | ||
| 1549 | isync | ||
| 1550 | |||
| 1551 | /* | ||
| 1552 | * For SMP, we don't do lazy VMX switching because it just gets too | ||
| 1553 | * horrendously complex, especially when a task switches from one CPU | ||
| 1554 | * to another. Instead we call giveup_altvec in switch_to. | ||
| 1555 | * VRSAVE isn't dealt with here, that is done in the normal context | ||
| 1556 | * switch code. Note that we could rely on vrsave value to eventually | ||
| 1557 | * avoid saving all of the VREGs here... | ||
| 1558 | */ | ||
| 1559 | #ifndef CONFIG_SMP | ||
| 1560 | ld r3,last_task_used_altivec@got(r2) | ||
| 1561 | ld r4,0(r3) | ||
| 1562 | cmpdi 0,r4,0 | ||
| 1563 | beq 1f | ||
| 1564 | /* Save VMX state to last_task_used_altivec's THREAD struct */ | ||
| 1565 | addi r4,r4,THREAD | ||
| 1566 | SAVE_32VRS(0,r5,r4) | ||
| 1567 | mfvscr vr0 | ||
| 1568 | li r10,THREAD_VSCR | ||
| 1569 | stvx vr0,r10,r4 | ||
| 1570 | /* Disable VMX for last_task_used_altivec */ | ||
| 1571 | ld r5,PT_REGS(r4) | ||
| 1572 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 1573 | lis r6,MSR_VEC@h | ||
| 1574 | andc r4,r4,r6 | ||
| 1575 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 1576 | 1: | ||
| 1577 | #endif /* CONFIG_SMP */ | ||
| 1578 | /* Hack: if we get an altivec unavailable trap with VRSAVE | ||
| 1579 | * set to all zeros, we assume this is a broken application | ||
| 1580 | * that fails to set it properly, and thus we switch it to | ||
| 1581 | * all 1's | ||
| 1582 | */ | ||
| 1583 | mfspr r4,SPRN_VRSAVE | ||
| 1584 | cmpdi 0,r4,0 | ||
| 1585 | bne+ 1f | ||
| 1586 | li r4,-1 | ||
| 1587 | mtspr SPRN_VRSAVE,r4 | ||
| 1588 | 1: | ||
| 1589 | /* enable use of VMX after return */ | ||
| 1590 | ld r4,PACACURRENT(r13) | ||
| 1591 | addi r5,r4,THREAD /* Get THREAD */ | ||
| 1592 | oris r12,r12,MSR_VEC@h | ||
| 1593 | std r12,_MSR(r1) | ||
| 1594 | li r4,1 | ||
| 1595 | li r10,THREAD_VSCR | ||
| 1596 | stw r4,THREAD_USED_VR(r5) | ||
| 1597 | lvx vr0,r10,r5 | ||
| 1598 | mtvscr vr0 | ||
| 1599 | REST_32VRS(0,r4,r5) | ||
| 1600 | #ifndef CONFIG_SMP | ||
| 1601 | /* Update last_task_used_math to 'current' */ | ||
| 1602 | subi r4,r5,THREAD /* Back to 'current' */ | ||
| 1603 | std r4,0(r3) | ||
| 1604 | #endif /* CONFIG_SMP */ | ||
| 1605 | /* restore registers and return */ | ||
| 1606 | b fast_exception_return | ||
| 1607 | |||
| 1608 | /* | ||
| 1609 | * disable_kernel_altivec() | ||
| 1610 | * Disable the VMX. | ||
| 1611 | */ | ||
| 1612 | _GLOBAL(disable_kernel_altivec) | ||
| 1613 | mfmsr r3 | ||
| 1614 | rldicl r0,r3,(63-MSR_VEC_LG),1 | ||
| 1615 | rldicl r3,r0,(MSR_VEC_LG+1),0 | ||
| 1616 | mtmsrd r3 /* disable use of VMX now */ | ||
| 1617 | isync | ||
| 1618 | blr | ||
| 1619 | |||
| 1620 | /* | ||
| 1621 | * giveup_altivec(tsk) | ||
| 1622 | * Disable VMX for the task given as the argument, | ||
| 1623 | * and save the vector registers in its thread_struct. | ||
| 1624 | * Enables the VMX for use in the kernel on return. | ||
| 1625 | */ | ||
| 1626 | _GLOBAL(giveup_altivec) | ||
| 1627 | mfmsr r5 | ||
| 1628 | oris r5,r5,MSR_VEC@h | ||
| 1629 | mtmsrd r5 /* enable use of VMX now */ | ||
| 1630 | isync | ||
| 1631 | cmpdi 0,r3,0 | ||
| 1632 | beqlr- /* if no previous owner, done */ | ||
| 1633 | addi r3,r3,THREAD /* want THREAD of task */ | ||
| 1634 | ld r5,PT_REGS(r3) | ||
| 1635 | cmpdi 0,r5,0 | ||
| 1636 | SAVE_32VRS(0,r4,r3) | ||
| 1637 | mfvscr vr0 | ||
| 1638 | li r4,THREAD_VSCR | ||
| 1639 | stvx vr0,r4,r3 | ||
| 1640 | beq 1f | ||
| 1641 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 1642 | lis r3,MSR_VEC@h | ||
| 1643 | andc r4,r4,r3 /* disable FP for previous task */ | ||
| 1644 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 1645 | 1: | ||
| 1646 | #ifndef CONFIG_SMP | ||
| 1647 | li r5,0 | ||
| 1648 | ld r4,last_task_used_altivec@got(r2) | ||
| 1649 | std r5,0(r4) | ||
| 1650 | #endif /* CONFIG_SMP */ | ||
| 1651 | blr | ||
| 1652 | |||
| 1653 | #endif /* CONFIG_ALTIVEC */ | ||
| 1654 | |||
| 1655 | #ifdef CONFIG_SMP | 1548 | #ifdef CONFIG_SMP |
| 1656 | #ifdef CONFIG_PPC_PMAC | 1549 | #ifdef CONFIG_PPC_PMAC |
| 1657 | /* | 1550 | /* |
| @@ -2002,9 +1895,6 @@ _STATIC(start_here_common) | |||
| 2002 | 1895 | ||
| 2003 | bl .start_kernel | 1896 | bl .start_kernel |
| 2004 | 1897 | ||
| 2005 | _GLOBAL(__setup_cpu_power3) | ||
| 2006 | blr | ||
| 2007 | |||
| 2008 | _GLOBAL(hmt_init) | 1898 | _GLOBAL(hmt_init) |
| 2009 | #ifdef CONFIG_HMT | 1899 | #ifdef CONFIG_HMT |
| 2010 | LOADADDR(r5, hmt_thread_data) | 1900 | LOADADDR(r5, hmt_thread_data) |
| @@ -2095,20 +1985,19 @@ _GLOBAL(smp_release_cpus) | |||
| 2095 | 1985 | ||
| 2096 | /* | 1986 | /* |
| 2097 | * We put a few things here that have to be page-aligned. | 1987 | * We put a few things here that have to be page-aligned. |
| 2098 | * This stuff goes at the beginning of the data segment, | 1988 | * This stuff goes at the beginning of the bss, which is page-aligned. |
| 2099 | * which is page-aligned. | ||
| 2100 | */ | 1989 | */ |
| 2101 | .data | 1990 | .section ".bss" |
| 1991 | |||
| 2102 | .align 12 | 1992 | .align 12 |
| 2103 | .globl sdata | 1993 | |
| 2104 | sdata: | ||
| 2105 | .globl empty_zero_page | 1994 | .globl empty_zero_page |
| 2106 | empty_zero_page: | 1995 | empty_zero_page: |
| 2107 | .space 4096 | 1996 | .space PAGE_SIZE |
| 2108 | 1997 | ||
| 2109 | .globl swapper_pg_dir | 1998 | .globl swapper_pg_dir |
| 2110 | swapper_pg_dir: | 1999 | swapper_pg_dir: |
| 2111 | .space 4096 | 2000 | .space PAGE_SIZE |
| 2112 | 2001 | ||
| 2113 | /* | 2002 | /* |
| 2114 | * This space gets a copy of optional info passed to us by the bootstrap | 2003 | * This space gets a copy of optional info passed to us by the bootstrap |
