aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/head.S
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-08-19 00:52:31 -0400
committerPaul Mackerras <paulus@samba.org>2005-08-28 20:53:33 -0400
commitec465515eeb662f66725c8c483a46b6bdd9bdd48 (patch)
treebde2ec1fe896a8c0f2b71fec6ec748805540961e /arch/ppc64/kernel/head.S
parent2e2446ea0758cd57dd065962d9544e3f4d44ea2b (diff)
[PATCH] Move iSeries and common vectors into unused space in head.S
In the ppc64 kernel head.S there is currently quite a lot of unused space between the naca (at fixed address 0x4000) and the fwnmi data area (at fixed address 0x7000). This patch moves various exception vectors and support code into this region to use the wasted space. The functions load_up_fpu and load_up_altivec are moved down as well, since they are essentially continuations of the fp_unavailable_common and altivec_unavailable_common vectors, respectively. Likewise, the fwnmi vectors themselves are moved down into this area, because while the location of the fwnmi data area is fixed by the RPA, the vectors themselves can be anywhere sufficiently low. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc64/kernel/head.S')
-rw-r--r--arch/ppc64/kernel/head.S353
1 files changed, 174 insertions, 179 deletions
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 13c03648a602..eb54f0548b01 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -52,9 +52,8 @@
52 * We layout physical memory as follows: 52 * We layout physical memory as follows:
53 * 0x0000 - 0x00ff : Secondary processor spin code 53 * 0x0000 - 0x00ff : Secondary processor spin code
54 * 0x0100 - 0x2fff : pSeries Interrupt prologs 54 * 0x0100 - 0x2fff : pSeries Interrupt prologs
55 * 0x3000 - 0x3fff : Interrupt support 55 * 0x3000 - 0x6fff : interrupt support, iSeries and common interrupt prologs
56 * 0x4000 - 0x4fff : NACA 56 * 0x7000 - 0x7fff : FWNMI data area
57 * 0x6000 : iSeries and common interrupt prologs
58 * 0x9000 - 0x9fff : Initial segment table 57 * 0x9000 - 0x9fff : Initial segment table
59 */ 58 */
60 59
@@ -501,17 +500,35 @@ system_call_pSeries:
501 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 500 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
502 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 501 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
503 502
503 . = 0x3000
504
505/*** pSeries interrupt support ***/
506
504 /* moved from 0xf00 */ 507 /* moved from 0xf00 */
505 STD_EXCEPTION_PSERIES(0x3000, performance_monitor) 508 STD_EXCEPTION_PSERIES(., performance_monitor)
506 509
507 . = 0x3100 510 .align 7
508_GLOBAL(do_stab_bolted_pSeries) 511_GLOBAL(do_stab_bolted_pSeries)
509 mtcrf 0x80,r12 512 mtcrf 0x80,r12
510 mfspr r12,SPRG2 513 mfspr r12,SPRG2
511 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 514 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
512 515
516/*
517 * Vectors for the FWNMI option. Share common code.
518 */
519 .globl system_reset_fwnmi
520system_reset_fwnmi:
521 HMT_MEDIUM
522 mtspr SPRG1,r13 /* save r13 */
523 RUNLATCH_ON(r13)
524 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
513 525
514 . = 0x6100 526 .globl machine_check_fwnmi
527machine_check_fwnmi:
528 HMT_MEDIUM
529 mtspr SPRG1,r13 /* save r13 */
530 RUNLATCH_ON(r13)
531 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
515 532
516#ifdef CONFIG_PPC_ISERIES 533#ifdef CONFIG_PPC_ISERIES
517/*** ISeries-LPAR interrupt handlers ***/ 534/*** ISeries-LPAR interrupt handlers ***/
@@ -656,51 +673,8 @@ hardware_interrupt_iSeries_masked:
656 ld r13,PACA_EXGEN+EX_R13(r13) 673 ld r13,PACA_EXGEN+EX_R13(r13)
657 rfid 674 rfid
658 b . /* prevent speculative execution */ 675 b . /* prevent speculative execution */
659#endif
660
661/*
662 * Data area reserved for FWNMI option.
663 */
664 .= 0x7000
665 .globl fwnmi_data_area
666fwnmi_data_area:
667
668#ifdef CONFIG_PPC_ISERIES
669 . = LPARMAP_PHYS
670#include "lparmap.s"
671#endif /* CONFIG_PPC_ISERIES */ 676#endif /* CONFIG_PPC_ISERIES */
672 677
673/*
674 * Vectors for the FWNMI option. Share common code.
675 */
676 . = 0x8000
677 .globl system_reset_fwnmi
678system_reset_fwnmi:
679 HMT_MEDIUM
680 mtspr SPRG1,r13 /* save r13 */
681 RUNLATCH_ON(r13)
682 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
683 .globl machine_check_fwnmi
684machine_check_fwnmi:
685 HMT_MEDIUM
686 mtspr SPRG1,r13 /* save r13 */
687 RUNLATCH_ON(r13)
688 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
689
690 /*
691 * Space for the initial segment table
692 * For LPAR, the hypervisor must fill in at least one entry
693 * before we get control (with relocate on)
694 */
695 . = STAB0_PHYS_ADDR
696 .globl __start_stab
697__start_stab:
698
699 . = (STAB0_PHYS_ADDR + PAGE_SIZE)
700 .globl __end_stab
701__end_stab:
702
703
704/*** Common interrupt handlers ***/ 678/*** Common interrupt handlers ***/
705 679
706 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 680 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
@@ -891,6 +865,62 @@ fp_unavailable_common:
891 bl .kernel_fp_unavailable_exception 865 bl .kernel_fp_unavailable_exception
892 BUG_OPCODE 866 BUG_OPCODE
893 867
868/*
869 * load_up_fpu(unused, unused, tsk)
870 * Disable FP for the task which had the FPU previously,
871 * and save its floating-point registers in its thread_struct.
872 * Enables the FPU for use in the kernel on return.
873 * On SMP we know the fpu is free, since we give it up every
874 * switch (ie, no lazy save of the FP registers).
875 * On entry: r13 == 'current' && last_task_used_math != 'current'
876 */
877_STATIC(load_up_fpu)
878 mfmsr r5 /* grab the current MSR */
879 ori r5,r5,MSR_FP
880 mtmsrd r5 /* enable use of fpu now */
881 isync
882/*
883 * For SMP, we don't do lazy FPU switching because it just gets too
884 * horrendously complex, especially when a task switches from one CPU
885 * to another. Instead we call giveup_fpu in switch_to.
886 *
887 */
888#ifndef CONFIG_SMP
889 ld r3,last_task_used_math@got(r2)
890 ld r4,0(r3)
891 cmpdi 0,r4,0
892 beq 1f
893 /* Save FP state to last_task_used_math's THREAD struct */
894 addi r4,r4,THREAD
895 SAVE_32FPRS(0, r4)
896 mffs fr0
897 stfd fr0,THREAD_FPSCR(r4)
898 /* Disable FP for last_task_used_math */
899 ld r5,PT_REGS(r4)
900 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
901 li r6,MSR_FP|MSR_FE0|MSR_FE1
902 andc r4,r4,r6
903 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9041:
905#endif /* CONFIG_SMP */
906 /* enable use of FP after return */
907 ld r4,PACACURRENT(r13)
908 addi r5,r4,THREAD /* Get THREAD */
909 ld r4,THREAD_FPEXC_MODE(r5)
910 ori r12,r12,MSR_FP
911 or r12,r12,r4
912 std r12,_MSR(r1)
913 lfd fr0,THREAD_FPSCR(r5)
914 mtfsf 0xff,fr0
915 REST_32FPRS(0, r5)
916#ifndef CONFIG_SMP
917 /* Update last_task_used_math to 'current' */
918 subi r4,r5,THREAD /* Back to 'current' */
919 std r4,0(r3)
920#endif /* CONFIG_SMP */
921 /* restore registers and return */
922 b fast_exception_return
923
894 .align 7 924 .align 7
895 .globl altivec_unavailable_common 925 .globl altivec_unavailable_common
896altivec_unavailable_common: 926altivec_unavailable_common:
@@ -906,6 +936,80 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
906 bl .altivec_unavailable_exception 936 bl .altivec_unavailable_exception
907 b .ret_from_except 937 b .ret_from_except
908 938
939#ifdef CONFIG_ALTIVEC
940/*
941 * load_up_altivec(unused, unused, tsk)
942 * Disable VMX for the task which had it previously,
943 * and save its vector registers in its thread_struct.
944 * Enables the VMX for use in the kernel on return.
945 * On SMP we know the VMX is free, since we give it up every
946 * switch (ie, no lazy save of the vector registers).
947 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
948 */
949_STATIC(load_up_altivec)
950 mfmsr r5 /* grab the current MSR */
951 oris r5,r5,MSR_VEC@h
952 mtmsrd r5 /* enable use of VMX now */
953 isync
954
955/*
956 * For SMP, we don't do lazy VMX switching because it just gets too
957 * horrendously complex, especially when a task switches from one CPU
958 * to another. Instead we call giveup_altvec in switch_to.
959 * VRSAVE isn't dealt with here, that is done in the normal context
960 * switch code. Note that we could rely on vrsave value to eventually
961 * avoid saving all of the VREGs here...
962 */
963#ifndef CONFIG_SMP
964 ld r3,last_task_used_altivec@got(r2)
965 ld r4,0(r3)
966 cmpdi 0,r4,0
967 beq 1f
968 /* Save VMX state to last_task_used_altivec's THREAD struct */
969 addi r4,r4,THREAD
970 SAVE_32VRS(0,r5,r4)
971 mfvscr vr0
972 li r10,THREAD_VSCR
973 stvx vr0,r10,r4
974 /* Disable VMX for last_task_used_altivec */
975 ld r5,PT_REGS(r4)
976 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
977 lis r6,MSR_VEC@h
978 andc r4,r4,r6
979 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9801:
981#endif /* CONFIG_SMP */
982 /* Hack: if we get an altivec unavailable trap with VRSAVE
983 * set to all zeros, we assume this is a broken application
984 * that fails to set it properly, and thus we switch it to
985 * all 1's
986 */
987 mfspr r4,SPRN_VRSAVE
988 cmpdi 0,r4,0
989 bne+ 1f
990 li r4,-1
991 mtspr SPRN_VRSAVE,r4
9921:
993 /* enable use of VMX after return */
994 ld r4,PACACURRENT(r13)
995 addi r5,r4,THREAD /* Get THREAD */
996 oris r12,r12,MSR_VEC@h
997 std r12,_MSR(r1)
998 li r4,1
999 li r10,THREAD_VSCR
1000 stw r4,THREAD_USED_VR(r5)
1001 lvx vr0,r10,r5
1002 mtvscr vr0
1003 REST_32VRS(0,r4,r5)
1004#ifndef CONFIG_SMP
1005 /* Update last_task_used_math to 'current' */
1006 subi r4,r5,THREAD /* Back to 'current' */
1007 std r4,0(r3)
1008#endif /* CONFIG_SMP */
1009 /* restore registers and return */
1010 b fast_exception_return
1011#endif /* CONFIG_ALTIVEC */
1012
909/* 1013/*
910 * Hash table stuff 1014 * Hash table stuff
911 */ 1015 */
@@ -1152,6 +1256,27 @@ unrecov_slb:
1152 bl .unrecoverable_exception 1256 bl .unrecoverable_exception
1153 b 1b 1257 b 1b
1154 1258
1259/*
1260 * Data area reserved for FWNMI option.
1261 * This address (0x7000) is fixed by the RPA.
1262 */
1263 .= 0x7000
1264 .globl fwnmi_data_area
1265fwnmi_data_area:
1266 .space PAGE_SIZE
1267
1268 /*
1269 * Space for the initial segment table
1270 * For LPAR, the hypervisor must fill in at least one entry
1271 * before we get control (with relocate on)
1272 */
1273 . = STAB0_PHYS_ADDR
1274 .globl __start_stab
1275__start_stab:
1276
1277 . = (STAB0_PHYS_ADDR + PAGE_SIZE)
1278 .globl __end_stab
1279__end_stab:
1155 1280
1156/* 1281/*
1157 * On pSeries, secondary processors spin in the following code. 1282 * On pSeries, secondary processors spin in the following code.
@@ -1416,62 +1541,6 @@ _GLOBAL(copy_and_flush)
1416copy_to_here: 1541copy_to_here:
1417 1542
1418/* 1543/*
1419 * load_up_fpu(unused, unused, tsk)
1420 * Disable FP for the task which had the FPU previously,
1421 * and save its floating-point registers in its thread_struct.
1422 * Enables the FPU for use in the kernel on return.
1423 * On SMP we know the fpu is free, since we give it up every
1424 * switch (ie, no lazy save of the FP registers).
1425 * On entry: r13 == 'current' && last_task_used_math != 'current'
1426 */
1427_STATIC(load_up_fpu)
1428 mfmsr r5 /* grab the current MSR */
1429 ori r5,r5,MSR_FP
1430 mtmsrd r5 /* enable use of fpu now */
1431 isync
1432/*
1433 * For SMP, we don't do lazy FPU switching because it just gets too
1434 * horrendously complex, especially when a task switches from one CPU
1435 * to another. Instead we call giveup_fpu in switch_to.
1436 *
1437 */
1438#ifndef CONFIG_SMP
1439 ld r3,last_task_used_math@got(r2)
1440 ld r4,0(r3)
1441 cmpdi 0,r4,0
1442 beq 1f
1443 /* Save FP state to last_task_used_math's THREAD struct */
1444 addi r4,r4,THREAD
1445 SAVE_32FPRS(0, r4)
1446 mffs fr0
1447 stfd fr0,THREAD_FPSCR(r4)
1448 /* Disable FP for last_task_used_math */
1449 ld r5,PT_REGS(r4)
1450 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1451 li r6,MSR_FP|MSR_FE0|MSR_FE1
1452 andc r4,r4,r6
1453 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
14541:
1455#endif /* CONFIG_SMP */
1456 /* enable use of FP after return */
1457 ld r4,PACACURRENT(r13)
1458 addi r5,r4,THREAD /* Get THREAD */
1459 ld r4,THREAD_FPEXC_MODE(r5)
1460 ori r12,r12,MSR_FP
1461 or r12,r12,r4
1462 std r12,_MSR(r1)
1463 lfd fr0,THREAD_FPSCR(r5)
1464 mtfsf 0xff,fr0
1465 REST_32FPRS(0, r5)
1466#ifndef CONFIG_SMP
1467 /* Update last_task_used_math to 'current' */
1468 subi r4,r5,THREAD /* Back to 'current' */
1469 std r4,0(r3)
1470#endif /* CONFIG_SMP */
1471 /* restore registers and return */
1472 b fast_exception_return
1473
1474/*
1475 * disable_kernel_fp() 1544 * disable_kernel_fp()
1476 * Disable the FPU. 1545 * Disable the FPU.
1477 */ 1546 */
@@ -1515,81 +1584,7 @@ _GLOBAL(giveup_fpu)
1515#endif /* CONFIG_SMP */ 1584#endif /* CONFIG_SMP */
1516 blr 1585 blr
1517 1586
1518
1519#ifdef CONFIG_ALTIVEC 1587#ifdef CONFIG_ALTIVEC
1520
1521/*
1522 * load_up_altivec(unused, unused, tsk)
1523 * Disable VMX for the task which had it previously,
1524 * and save its vector registers in its thread_struct.
1525 * Enables the VMX for use in the kernel on return.
1526 * On SMP we know the VMX is free, since we give it up every
1527 * switch (ie, no lazy save of the vector registers).
1528 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1529 */
1530_STATIC(load_up_altivec)
1531 mfmsr r5 /* grab the current MSR */
1532 oris r5,r5,MSR_VEC@h
1533 mtmsrd r5 /* enable use of VMX now */
1534 isync
1535
1536/*
1537 * For SMP, we don't do lazy VMX switching because it just gets too
1538 * horrendously complex, especially when a task switches from one CPU
1539 * to another. Instead we call giveup_altvec in switch_to.
1540 * VRSAVE isn't dealt with here, that is done in the normal context
1541 * switch code. Note that we could rely on vrsave value to eventually
1542 * avoid saving all of the VREGs here...
1543 */
1544#ifndef CONFIG_SMP
1545 ld r3,last_task_used_altivec@got(r2)
1546 ld r4,0(r3)
1547 cmpdi 0,r4,0
1548 beq 1f
1549 /* Save VMX state to last_task_used_altivec's THREAD struct */
1550 addi r4,r4,THREAD
1551 SAVE_32VRS(0,r5,r4)
1552 mfvscr vr0
1553 li r10,THREAD_VSCR
1554 stvx vr0,r10,r4
1555 /* Disable VMX for last_task_used_altivec */
1556 ld r5,PT_REGS(r4)
1557 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1558 lis r6,MSR_VEC@h
1559 andc r4,r4,r6
1560 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15611:
1562#endif /* CONFIG_SMP */
1563 /* Hack: if we get an altivec unavailable trap with VRSAVE
1564 * set to all zeros, we assume this is a broken application
1565 * that fails to set it properly, and thus we switch it to
1566 * all 1's
1567 */
1568 mfspr r4,SPRN_VRSAVE
1569 cmpdi 0,r4,0
1570 bne+ 1f
1571 li r4,-1
1572 mtspr SPRN_VRSAVE,r4
15731:
1574 /* enable use of VMX after return */
1575 ld r4,PACACURRENT(r13)
1576 addi r5,r4,THREAD /* Get THREAD */
1577 oris r12,r12,MSR_VEC@h
1578 std r12,_MSR(r1)
1579 li r4,1
1580 li r10,THREAD_VSCR
1581 stw r4,THREAD_USED_VR(r5)
1582 lvx vr0,r10,r5
1583 mtvscr vr0
1584 REST_32VRS(0,r4,r5)
1585#ifndef CONFIG_SMP
1586 /* Update last_task_used_math to 'current' */
1587 subi r4,r5,THREAD /* Back to 'current' */
1588 std r4,0(r3)
1589#endif /* CONFIG_SMP */
1590 /* restore registers and return */
1591 b fast_exception_return
1592
1593/* 1588/*
1594 * disable_kernel_altivec() 1589 * disable_kernel_altivec()
1595 * Disable the VMX. 1590 * Disable the VMX.