diff options
-rw-r--r-- | arch/powerpc/kernel/fpu.S | 23 | ||||
-rw-r--r-- | include/asm-powerpc/ppc_asm.h | 103 |
2 files changed, 126 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 8a2b3d62b9b8..15247fe171a8 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S | |||
@@ -24,6 +24,29 @@ | |||
24 | #include <asm/ppc_asm.h> | 24 | #include <asm/ppc_asm.h> |
25 | #include <asm/asm-offsets.h> | 25 | #include <asm/asm-offsets.h> |
26 | 26 | ||
27 | #ifdef CONFIG_VSX | ||
28 | #define REST_32FPVSRS(n,c,base) \ | ||
29 | BEGIN_FTR_SECTION \ | ||
30 | b 2f; \ | ||
31 | END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ | ||
32 | REST_32FPRS(n,base); \ | ||
33 | b 3f; \ | ||
34 | 2: REST_32VSRS(n,c,base); \ | ||
35 | 3: | ||
36 | |||
37 | #define SAVE_32FPVSRS(n,c,base) \ | ||
38 | BEGIN_FTR_SECTION \ | ||
39 | b 2f; \ | ||
40 | END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ | ||
41 | SAVE_32FPRS(n,base); \ | ||
42 | b 3f; \ | ||
43 | 2: SAVE_32VSRS(n,c,base); \ | ||
44 | 3: | ||
45 | #else | ||
46 | #define REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) | ||
47 | #define SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) | ||
48 | #endif | ||
49 | |||
27 | /* | 50 | /* |
28 | * This task wants to use the FPU now. | 51 | * This task wants to use the FPU now. |
29 | * On UP, disable FP for the task which had the FPU previously, | 52 | * On UP, disable FP for the task which had the FPU previously, |
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h index 8f0426f973c9..0966899d974b 100644 --- a/include/asm-powerpc/ppc_asm.h +++ b/include/asm-powerpc/ppc_asm.h | |||
@@ -74,6 +74,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ | |||
74 | REST_10GPRS(22, base) | 74 | REST_10GPRS(22, base) |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | /* | ||
78 | * Define what the VSX XX1 form instructions will look like, then add | ||
79 | * the 128 bit load store instructions based on that. | ||
80 | */ | ||
81 | #define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \ | ||
82 | ((rb) << 11) | (((xs) >> 5))) | ||
83 | |||
84 | #define STXVD2X(xs, ra, rb) .long (0x7c000798 | VSX_XX1((xs), (ra), (rb))) | ||
85 | #define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb))) | ||
77 | 86 | ||
78 | #define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) | 87 | #define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) |
79 | #define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base) | 88 | #define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base) |
@@ -110,6 +119,33 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ | |||
110 | #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) | 119 | #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) |
111 | #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) | 120 | #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) |
112 | 121 | ||
122 | /* Save the lower 32 VSRs in the thread VSR region */ | ||
123 | #define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,b,base) | ||
124 | #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) | ||
125 | #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) | ||
126 | #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) | ||
127 | #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) | ||
128 | #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) | ||
129 | #define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,b,base) | ||
130 | #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) | ||
131 | #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) | ||
132 | #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) | ||
133 | #define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base) | ||
134 | #define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base) | ||
135 | /* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */ | ||
136 | #define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,b,base) | ||
137 | #define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base) | ||
138 | #define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base) | ||
139 | #define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base) | ||
140 | #define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base) | ||
141 | #define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base) | ||
142 | #define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,b,base) | ||
143 | #define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base) | ||
144 | #define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base) | ||
145 | #define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base) | ||
146 | #define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base) | ||
147 | #define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base) | ||
148 | |||
113 | #define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base) | 149 | #define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base) |
114 | #define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base) | 150 | #define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base) |
115 | #define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base) | 151 | #define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base) |
@@ -540,6 +576,73 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) | |||
540 | #define vr30 30 | 576 | #define vr30 30 |
541 | #define vr31 31 | 577 | #define vr31 31 |
542 | 578 | ||
579 | /* VSX Registers (VSRs) */ | ||
580 | |||
581 | #define vsr0 0 | ||
582 | #define vsr1 1 | ||
583 | #define vsr2 2 | ||
584 | #define vsr3 3 | ||
585 | #define vsr4 4 | ||
586 | #define vsr5 5 | ||
587 | #define vsr6 6 | ||
588 | #define vsr7 7 | ||
589 | #define vsr8 8 | ||
590 | #define vsr9 9 | ||
591 | #define vsr10 10 | ||
592 | #define vsr11 11 | ||
593 | #define vsr12 12 | ||
594 | #define vsr13 13 | ||
595 | #define vsr14 14 | ||
596 | #define vsr15 15 | ||
597 | #define vsr16 16 | ||
598 | #define vsr17 17 | ||
599 | #define vsr18 18 | ||
600 | #define vsr19 19 | ||
601 | #define vsr20 20 | ||
602 | #define vsr21 21 | ||
603 | #define vsr22 22 | ||
604 | #define vsr23 23 | ||
605 | #define vsr24 24 | ||
606 | #define vsr25 25 | ||
607 | #define vsr26 26 | ||
608 | #define vsr27 27 | ||
609 | #define vsr28 28 | ||
610 | #define vsr29 29 | ||
611 | #define vsr30 30 | ||
612 | #define vsr31 31 | ||
613 | #define vsr32 32 | ||
614 | #define vsr33 33 | ||
615 | #define vsr34 34 | ||
616 | #define vsr35 35 | ||
617 | #define vsr36 36 | ||
618 | #define vsr37 37 | ||
619 | #define vsr38 38 | ||
620 | #define vsr39 39 | ||
621 | #define vsr40 40 | ||
622 | #define vsr41 41 | ||
623 | #define vsr42 42 | ||
624 | #define vsr43 43 | ||
625 | #define vsr44 44 | ||
626 | #define vsr45 45 | ||
627 | #define vsr46 46 | ||
628 | #define vsr47 47 | ||
629 | #define vsr48 48 | ||
630 | #define vsr49 49 | ||
631 | #define vsr50 50 | ||
632 | #define vsr51 51 | ||
633 | #define vsr52 52 | ||
634 | #define vsr53 53 | ||
635 | #define vsr54 54 | ||
636 | #define vsr55 55 | ||
637 | #define vsr56 56 | ||
638 | #define vsr57 57 | ||
639 | #define vsr58 58 | ||
640 | #define vsr59 59 | ||
641 | #define vsr60 60 | ||
642 | #define vsr61 61 | ||
643 | #define vsr62 62 | ||
644 | #define vsr63 63 | ||
645 | |||
543 | /* SPE Registers (EVPRs) */ | 646 | /* SPE Registers (EVPRs) */ |
544 | 647 | ||
545 | #define evr0 0 | 648 | #define evr0 0 |