diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/kernel/ivt.S | 84 | ||||
-rw-r--r-- | arch/ia64/kernel/minstate.h | 46 | ||||
-rw-r--r-- | arch/ia64/kernel/patch.c | 23 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 11 | ||||
-rw-r--r-- | arch/ia64/kernel/vmlinux.lds.S | 7 |
5 files changed, 125 insertions, 46 deletions
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 6678c49daba3..80b44ea052d7 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -1076,48 +1076,6 @@ END(ia64_syscall_setup) | |||
1076 | DBG_FAULT(15) | 1076 | DBG_FAULT(15) |
1077 | FAULT(15) | 1077 | FAULT(15) |
1078 | 1078 | ||
1079 | /* | ||
1080 | * Squatting in this space ... | ||
1081 | * | ||
1082 | * This special case dispatcher for illegal operation faults allows preserved | ||
1083 | * registers to be modified through a callback function (asm only) that is handed | ||
1084 | * back from the fault handler in r8. Up to three arguments can be passed to the | ||
1085 | * callback function by returning an aggregate with the callback as its first | ||
1086 | * element, followed by the arguments. | ||
1087 | */ | ||
1088 | ENTRY(dispatch_illegal_op_fault) | ||
1089 | .prologue | ||
1090 | .body | ||
1091 | SAVE_MIN_WITH_COVER | ||
1092 | ssm psr.ic | PSR_DEFAULT_BITS | ||
1093 | ;; | ||
1094 | srlz.i // guarantee that interruption collection is on | ||
1095 | ;; | ||
1096 | (p15) ssm psr.i // restore psr.i | ||
1097 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
1098 | ;; | ||
1099 | alloc r14=ar.pfs,0,0,1,0 // must be first in insn group | ||
1100 | mov out0=ar.ec | ||
1101 | ;; | ||
1102 | SAVE_REST | ||
1103 | PT_REGS_UNWIND_INFO(0) | ||
1104 | ;; | ||
1105 | br.call.sptk.many rp=ia64_illegal_op_fault | ||
1106 | .ret0: ;; | ||
1107 | alloc r14=ar.pfs,0,0,3,0 // must be first in insn group | ||
1108 | mov out0=r9 | ||
1109 | mov out1=r10 | ||
1110 | mov out2=r11 | ||
1111 | movl r15=ia64_leave_kernel | ||
1112 | ;; | ||
1113 | mov rp=r15 | ||
1114 | mov b6=r8 | ||
1115 | ;; | ||
1116 | cmp.ne p6,p0=0,r8 | ||
1117 | (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel | ||
1118 | br.sptk.many ia64_leave_kernel | ||
1119 | END(dispatch_illegal_op_fault) | ||
1120 | |||
1121 | .org ia64_ivt+0x4000 | 1079 | .org ia64_ivt+0x4000 |
1122 | ///////////////////////////////////////////////////////////////////////////////////////// | 1080 | ///////////////////////////////////////////////////////////////////////////////////////// |
1123 | // 0x4000 Entry 16 (size 64 bundles) Reserved | 1081 | // 0x4000 Entry 16 (size 64 bundles) Reserved |
@@ -1715,6 +1673,48 @@ END(ia32_interrupt) | |||
1715 | DBG_FAULT(67) | 1673 | DBG_FAULT(67) |
1716 | FAULT(67) | 1674 | FAULT(67) |
1717 | 1675 | ||
1676 | /* | ||
1677 | * Squatting in this space ... | ||
1678 | * | ||
1679 | * This special case dispatcher for illegal operation faults allows preserved | ||
1680 | * registers to be modified through a callback function (asm only) that is handed | ||
1681 | * back from the fault handler in r8. Up to three arguments can be passed to the | ||
1682 | * callback function by returning an aggregate with the callback as its first | ||
1683 | * element, followed by the arguments. | ||
1684 | */ | ||
1685 | ENTRY(dispatch_illegal_op_fault) | ||
1686 | .prologue | ||
1687 | .body | ||
1688 | SAVE_MIN_WITH_COVER | ||
1689 | ssm psr.ic | PSR_DEFAULT_BITS | ||
1690 | ;; | ||
1691 | srlz.i // guarantee that interruption collection is on | ||
1692 | ;; | ||
1693 | (p15) ssm psr.i // restore psr.i | ||
1694 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
1695 | ;; | ||
1696 | alloc r14=ar.pfs,0,0,1,0 // must be first in insn group | ||
1697 | mov out0=ar.ec | ||
1698 | ;; | ||
1699 | SAVE_REST | ||
1700 | PT_REGS_UNWIND_INFO(0) | ||
1701 | ;; | ||
1702 | br.call.sptk.many rp=ia64_illegal_op_fault | ||
1703 | .ret0: ;; | ||
1704 | alloc r14=ar.pfs,0,0,3,0 // must be first in insn group | ||
1705 | mov out0=r9 | ||
1706 | mov out1=r10 | ||
1707 | mov out2=r11 | ||
1708 | movl r15=ia64_leave_kernel | ||
1709 | ;; | ||
1710 | mov rp=r15 | ||
1711 | mov b6=r8 | ||
1712 | ;; | ||
1713 | cmp.ne p6,p0=0,r8 | ||
1714 | (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel | ||
1715 | br.sptk.many ia64_leave_kernel | ||
1716 | END(dispatch_illegal_op_fault) | ||
1717 | |||
1718 | #ifdef CONFIG_IA32_SUPPORT | 1718 | #ifdef CONFIG_IA32_SUPPORT |
1719 | 1719 | ||
1720 | /* | 1720 | /* |
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index 7c548ac52bbc..74b6d670aaef 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h | |||
@@ -15,6 +15,9 @@ | |||
15 | #define ACCOUNT_SYS_ENTER | 15 | #define ACCOUNT_SYS_ENTER |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | .section ".data.patch.rse", "a" | ||
19 | .previous | ||
20 | |||
18 | /* | 21 | /* |
19 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves | 22 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves |
20 | * the minimum state necessary that allows us to turn psr.ic back | 23 | * the minimum state necessary that allows us to turn psr.ic back |
@@ -40,7 +43,7 @@ | |||
40 | * Note that psr.ic is NOT turned on by this macro. This is so that | 43 | * Note that psr.ic is NOT turned on by this macro. This is so that |
41 | * we can pass interruption state as arguments to a handler. | 44 | * we can pass interruption state as arguments to a handler. |
42 | */ | 45 | */ |
43 | #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ | 46 | #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA,WORKAROUND) \ |
44 | mov r16=IA64_KR(CURRENT); /* M */ \ | 47 | mov r16=IA64_KR(CURRENT); /* M */ \ |
45 | mov r27=ar.rsc; /* M */ \ | 48 | mov r27=ar.rsc; /* M */ \ |
46 | mov r20=r1; /* A */ \ | 49 | mov r20=r1; /* A */ \ |
@@ -87,6 +90,7 @@ | |||
87 | tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ | 90 | tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ |
88 | mov r29=b0 \ | 91 | mov r29=b0 \ |
89 | ;; \ | 92 | ;; \ |
93 | WORKAROUND; \ | ||
90 | adds r16=PT(R8),r1; /* initialize first base pointer */ \ | 94 | adds r16=PT(R8),r1; /* initialize first base pointer */ \ |
91 | adds r17=PT(R9),r1; /* initialize second base pointer */ \ | 95 | adds r17=PT(R9),r1; /* initialize second base pointer */ \ |
92 | (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ | 96 | (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ |
@@ -206,6 +210,40 @@ | |||
206 | st8 [r25]=r10; /* ar.ssd */ \ | 210 | st8 [r25]=r10; /* ar.ssd */ \ |
207 | ;; | 211 | ;; |
208 | 212 | ||
209 | #define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,) | 213 | #define RSE_WORKAROUND \ |
210 | #define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19) | 214 | (pUStk) extr.u r17=r18,3,6; \ |
211 | #define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, ) | 215 | (pUStk) sub r16=r18,r22; \ |
216 | [1:](pKStk) br.cond.sptk.many 1f; \ | ||
217 | .xdata4 ".data.patch.rse",1b-. \ | ||
218 | ;; \ | ||
219 | cmp.ge p6,p7 = 33,r17; \ | ||
220 | ;; \ | ||
221 | (p6) mov r17=0x310; \ | ||
222 | (p7) mov r17=0x308; \ | ||
223 | ;; \ | ||
224 | cmp.leu p1,p0=r16,r17; \ | ||
225 | (p1) br.cond.sptk.many 1f; \ | ||
226 | dep.z r17=r26,0,62; \ | ||
227 | movl r16=2f; \ | ||
228 | ;; \ | ||
229 | mov ar.pfs=r17; \ | ||
230 | dep r27=r0,r27,16,14; \ | ||
231 | mov b0=r16; \ | ||
232 | ;; \ | ||
233 | br.ret.sptk b0; \ | ||
234 | ;; \ | ||
235 | 2: \ | ||
236 | mov ar.rsc=r0 \ | ||
237 | ;; \ | ||
238 | flushrs; \ | ||
239 | ;; \ | ||
240 | mov ar.bspstore=r22 \ | ||
241 | ;; \ | ||
242 | mov r18=ar.bsp; \ | ||
243 | ;; \ | ||
244 | 1: \ | ||
245 | .pred.rel "mutex", pKStk, pUStk | ||
246 | |||
247 | #define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs, , RSE_WORKAROUND) | ||
248 | #define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND) | ||
249 | #define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , ) | ||
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index e0dca8743dbb..b83b2c516008 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c | |||
@@ -115,6 +115,29 @@ ia64_patch_vtop (unsigned long start, unsigned long end) | |||
115 | ia64_srlz_i(); | 115 | ia64_srlz_i(); |
116 | } | 116 | } |
117 | 117 | ||
118 | /* | ||
119 | * Disable the RSE workaround by turning the conditional branch | ||
120 | * that we tagged in each place the workaround was used into an | ||
121 | * unconditional branch. | ||
122 | */ | ||
123 | void __init | ||
124 | ia64_patch_rse (unsigned long start, unsigned long end) | ||
125 | { | ||
126 | s32 *offp = (s32 *) start; | ||
127 | u64 ip, *b; | ||
128 | |||
129 | while (offp < (s32 *) end) { | ||
130 | ip = (u64) offp + *offp; | ||
131 | |||
132 | b = (u64 *)(ip & -16); | ||
133 | b[1] &= ~0xf800000L; | ||
134 | ia64_fc((void *) ip); | ||
135 | ++offp; | ||
136 | } | ||
137 | ia64_sync_i(); | ||
138 | ia64_srlz_i(); | ||
139 | } | ||
140 | |||
118 | void __init | 141 | void __init |
119 | ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) | 142 | ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) |
120 | { | 143 | { |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index e9596cd0cdab..f48a809c686d 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -560,6 +560,17 @@ setup_arch (char **cmdline_p) | |||
560 | /* process SAL system table: */ | 560 | /* process SAL system table: */ |
561 | ia64_sal_init(__va(efi.sal_systab)); | 561 | ia64_sal_init(__va(efi.sal_systab)); |
562 | 562 | ||
563 | #ifdef CONFIG_ITANIUM | ||
564 | ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); | ||
565 | #else | ||
566 | { | ||
567 | u64 num_phys_stacked; | ||
568 | |||
569 | if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96) | ||
570 | ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); | ||
571 | } | ||
572 | #endif | ||
573 | |||
563 | #ifdef CONFIG_SMP | 574 | #ifdef CONFIG_SMP |
564 | cpu_physical_id(0) = hard_smp_processor_id(); | 575 | cpu_physical_id(0) = hard_smp_processor_id(); |
565 | #endif | 576 | #endif |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 80622acc95de..5929ab10a289 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -156,6 +156,13 @@ SECTIONS | |||
156 | __end___vtop_patchlist = .; | 156 | __end___vtop_patchlist = .; |
157 | } | 157 | } |
158 | 158 | ||
159 | .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET) | ||
160 | { | ||
161 | __start___rse_patchlist = .; | ||
162 | *(.data.patch.rse) | ||
163 | __end___rse_patchlist = .; | ||
164 | } | ||
165 | |||
159 | .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) | 166 | .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) |
160 | { | 167 | { |
161 | __start___mckinley_e9_bundles = .; | 168 | __start___mckinley_e9_bundles = .; |