diff options
author | Ashok Raj <ashok.raj@intel.com> | 2005-04-22 17:44:40 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-04-22 17:44:40 -0400 |
commit | b8d8b883e6f029e99c35c88f853501740e322131 (patch) | |
tree | 391f2ade8823149f217991eb02911bf3dacce050 /arch/ia64 | |
parent | 7130667107cd3ab9d6802b69bab63c7d22f20bd4 (diff) |
[IA64] cpu hotplug: return offlined cpus to SAL
This patch is required to support cpu removal for IPF systems. Existing code
just fakes the real offline by keeping it run the idle thread, and polling
for the bit to re-appear in the cpu_state to get out of the idle loop.
For the cpu-offline to work correctly, we need to pass control of this CPU
back to SAL so it can continue in the boot-rendez mode. This gives the
SAL control to not pick this cpu as the monarch processor for global MCA
events, and addition does not wait for this cpu to checkin with SAL
for global MCA events as well. The handoff is implemented as documented in
SAL specification section 3.2.5.1 "OS_BOOT_RENDEZ to SAL return State"
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/kernel/head.S | 280 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_asm.S | 88 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 22 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 81 |
4 files changed, 361 insertions, 110 deletions
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 105c7fec8c6d..0d535d65eea6 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -15,6 +15,8 @@ | |||
15 | * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com> | 15 | * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com> |
16 | * Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com> | 16 | * Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com> |
17 | * -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2. | 17 | * -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2. |
18 | * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com> | ||
19 | * Support for CPU Hotplug | ||
18 | */ | 20 | */ |
19 | 21 | ||
20 | #include <linux/config.h> | 22 | #include <linux/config.h> |
@@ -29,6 +31,134 @@ | |||
29 | #include <asm/processor.h> | 31 | #include <asm/processor.h> |
30 | #include <asm/ptrace.h> | 32 | #include <asm/ptrace.h> |
31 | #include <asm/system.h> | 33 | #include <asm/system.h> |
34 | #include <asm/mca_asm.h> | ||
35 | |||
36 | #ifdef CONFIG_HOTPLUG_CPU | ||
37 | #define SAL_PSR_BITS_TO_SET \ | ||
38 | (IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_MFH | IA64_PSR_MFL) | ||
39 | |||
40 | #define SAVE_FROM_REG(src, ptr, dest) \ | ||
41 | mov dest=src;; \ | ||
42 | st8 [ptr]=dest,0x08 | ||
43 | |||
44 | #define RESTORE_REG(reg, ptr, _tmp) \ | ||
45 | ld8 _tmp=[ptr],0x08;; \ | ||
46 | mov reg=_tmp | ||
47 | |||
48 | #define SAVE_BREAK_REGS(ptr, _idx, _breg, _dest)\ | ||
49 | mov ar.lc=IA64_NUM_DBG_REGS-1;; \ | ||
50 | mov _idx=0;; \ | ||
51 | 1: \ | ||
52 | SAVE_FROM_REG(_breg[_idx], ptr, _dest);; \ | ||
53 | add _idx=1,_idx;; \ | ||
54 | br.cloop.sptk.many 1b | ||
55 | |||
56 | #define RESTORE_BREAK_REGS(ptr, _idx, _breg, _tmp, _lbl)\ | ||
57 | mov ar.lc=IA64_NUM_DBG_REGS-1;; \ | ||
58 | mov _idx=0;; \ | ||
59 | _lbl: RESTORE_REG(_breg[_idx], ptr, _tmp);; \ | ||
60 | add _idx=1, _idx;; \ | ||
61 | br.cloop.sptk.many _lbl | ||
62 | |||
63 | #define SAVE_ONE_RR(num, _reg, _tmp) \ | ||
64 | movl _tmp=(num<<61);; \ | ||
65 | mov _reg=rr[_tmp] | ||
66 | |||
67 | #define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \ | ||
68 | SAVE_ONE_RR(0,_r0, _tmp);; \ | ||
69 | SAVE_ONE_RR(1,_r1, _tmp);; \ | ||
70 | SAVE_ONE_RR(2,_r2, _tmp);; \ | ||
71 | SAVE_ONE_RR(3,_r3, _tmp);; \ | ||
72 | SAVE_ONE_RR(4,_r4, _tmp);; \ | ||
73 | SAVE_ONE_RR(5,_r5, _tmp);; \ | ||
74 | SAVE_ONE_RR(6,_r6, _tmp);; \ | ||
75 | SAVE_ONE_RR(7,_r7, _tmp);; | ||
76 | |||
77 | #define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \ | ||
78 | st8 [ptr]=_r0, 8;; \ | ||
79 | st8 [ptr]=_r1, 8;; \ | ||
80 | st8 [ptr]=_r2, 8;; \ | ||
81 | st8 [ptr]=_r3, 8;; \ | ||
82 | st8 [ptr]=_r4, 8;; \ | ||
83 | st8 [ptr]=_r5, 8;; \ | ||
84 | st8 [ptr]=_r6, 8;; \ | ||
85 | st8 [ptr]=_r7, 8;; | ||
86 | |||
87 | #define RESTORE_REGION_REGS(ptr, _idx1, _idx2, _tmp) \ | ||
88 | mov ar.lc=0x08-1;; \ | ||
89 | movl _idx1=0x00;; \ | ||
90 | RestRR: \ | ||
91 | dep.z _idx2=_idx1,61,3;; \ | ||
92 | ld8 _tmp=[ptr],8;; \ | ||
93 | mov rr[_idx2]=_tmp;; \ | ||
94 | srlz.d;; \ | ||
95 | add _idx1=1,_idx1;; \ | ||
96 | br.cloop.sptk.few RestRR | ||
97 | |||
98 | /* | ||
99 | * Adjust region registers saved before starting to save | ||
100 | * break regs and rest of the states that need to be preserved. | ||
101 | */ | ||
102 | #define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(_reg1,_reg2,_pred) \ | ||
103 | SAVE_FROM_REG(b0,_reg1,_reg2);; \ | ||
104 | SAVE_FROM_REG(b1,_reg1,_reg2);; \ | ||
105 | SAVE_FROM_REG(b2,_reg1,_reg2);; \ | ||
106 | SAVE_FROM_REG(b3,_reg1,_reg2);; \ | ||
107 | SAVE_FROM_REG(b4,_reg1,_reg2);; \ | ||
108 | SAVE_FROM_REG(b5,_reg1,_reg2);; \ | ||
109 | st8 [_reg1]=r1,0x08;; \ | ||
110 | st8 [_reg1]=r12,0x08;; \ | ||
111 | st8 [_reg1]=r13,0x08;; \ | ||
112 | SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);; \ | ||
113 | SAVE_FROM_REG(ar.pfs,_reg1,_reg2);; \ | ||
114 | SAVE_FROM_REG(ar.rnat,_reg1,_reg2);; \ | ||
115 | SAVE_FROM_REG(ar.unat,_reg1,_reg2);; \ | ||
116 | SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);; \ | ||
117 | SAVE_FROM_REG(cr.dcr,_reg1,_reg2);; \ | ||
118 | SAVE_FROM_REG(cr.iva,_reg1,_reg2);; \ | ||
119 | SAVE_FROM_REG(cr.pta,_reg1,_reg2);; \ | ||
120 | SAVE_FROM_REG(cr.itv,_reg1,_reg2);; \ | ||
121 | SAVE_FROM_REG(cr.pmv,_reg1,_reg2);; \ | ||
122 | SAVE_FROM_REG(cr.cmcv,_reg1,_reg2);; \ | ||
123 | SAVE_FROM_REG(cr.lrr0,_reg1,_reg2);; \ | ||
124 | SAVE_FROM_REG(cr.lrr1,_reg1,_reg2);; \ | ||
125 | st8 [_reg1]=r4,0x08;; \ | ||
126 | st8 [_reg1]=r5,0x08;; \ | ||
127 | st8 [_reg1]=r6,0x08;; \ | ||
128 | st8 [_reg1]=r7,0x08;; \ | ||
129 | st8 [_reg1]=_pred,0x08;; \ | ||
130 | SAVE_FROM_REG(ar.lc, _reg1, _reg2);; \ | ||
131 | stf.spill.nta [_reg1]=f2,16;; \ | ||
132 | stf.spill.nta [_reg1]=f3,16;; \ | ||
133 | stf.spill.nta [_reg1]=f4,16;; \ | ||
134 | stf.spill.nta [_reg1]=f5,16;; \ | ||
135 | stf.spill.nta [_reg1]=f16,16;; \ | ||
136 | stf.spill.nta [_reg1]=f17,16;; \ | ||
137 | stf.spill.nta [_reg1]=f18,16;; \ | ||
138 | stf.spill.nta [_reg1]=f19,16;; \ | ||
139 | stf.spill.nta [_reg1]=f20,16;; \ | ||
140 | stf.spill.nta [_reg1]=f21,16;; \ | ||
141 | stf.spill.nta [_reg1]=f22,16;; \ | ||
142 | stf.spill.nta [_reg1]=f23,16;; \ | ||
143 | stf.spill.nta [_reg1]=f24,16;; \ | ||
144 | stf.spill.nta [_reg1]=f25,16;; \ | ||
145 | stf.spill.nta [_reg1]=f26,16;; \ | ||
146 | stf.spill.nta [_reg1]=f27,16;; \ | ||
147 | stf.spill.nta [_reg1]=f28,16;; \ | ||
148 | stf.spill.nta [_reg1]=f29,16;; \ | ||
149 | stf.spill.nta [_reg1]=f30,16;; \ | ||
150 | stf.spill.nta [_reg1]=f31,16;; | ||
151 | |||
152 | #else | ||
153 | #define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(a1,a2) | ||
154 | #define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) | ||
155 | #define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) | ||
156 | #endif | ||
157 | |||
158 | #define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \ | ||
159 | movl _tmp1=(num << 61);; \ | ||
160 | mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \ | ||
161 | mov rr[_tmp1]=_tmp2 | ||
32 | 162 | ||
33 | .section __special_page_section,"ax" | 163 | .section __special_page_section,"ax" |
34 | 164 | ||
@@ -64,6 +194,12 @@ start_ap: | |||
64 | srlz.i | 194 | srlz.i |
65 | ;; | 195 | ;; |
66 | /* | 196 | /* |
197 | * Save the region registers, predicate before they get clobbered | ||
198 | */ | ||
199 | SAVE_REGION_REGS(r2, r8,r9,r10,r11,r12,r13,r14,r15); | ||
200 | mov r25=pr;; | ||
201 | |||
202 | /* | ||
67 | * Initialize kernel region registers: | 203 | * Initialize kernel region registers: |
68 | * rr[0]: VHPT enabled, page size = PAGE_SHIFT | 204 | * rr[0]: VHPT enabled, page size = PAGE_SHIFT |
69 | * rr[1]: VHPT enabled, page size = PAGE_SHIFT | 205 | * rr[1]: VHPT enabled, page size = PAGE_SHIFT |
@@ -76,32 +212,14 @@ start_ap: | |||
76 | * We initialize all of them to prevent inadvertently assuming | 212 | * We initialize all of them to prevent inadvertently assuming |
77 | * something about the state of address translation early in boot. | 213 | * something about the state of address translation early in boot. |
78 | */ | 214 | */ |
79 | mov r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT << 2) | 1) | 215 | SET_ONE_RR(0, PAGE_SHIFT, r2, r16, 1);; |
80 | movl r7=(0<<61) | 216 | SET_ONE_RR(1, PAGE_SHIFT, r2, r16, 1);; |
81 | mov r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT << 2) | 1) | 217 | SET_ONE_RR(2, PAGE_SHIFT, r2, r16, 1);; |
82 | movl r9=(1<<61) | 218 | SET_ONE_RR(3, PAGE_SHIFT, r2, r16, 1);; |
83 | mov r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT << 2) | 1) | 219 | SET_ONE_RR(4, PAGE_SHIFT, r2, r16, 1);; |
84 | movl r11=(2<<61) | 220 | SET_ONE_RR(5, PAGE_SHIFT, r2, r16, 1);; |
85 | mov r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT << 2) | 1) | 221 | SET_ONE_RR(6, IA64_GRANULE_SHIFT, r2, r16, 0);; |
86 | movl r13=(3<<61) | 222 | SET_ONE_RR(7, IA64_GRANULE_SHIFT, r2, r16, 0);; |
87 | mov r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT << 2) | 1) | ||
88 | movl r15=(4<<61) | ||
89 | mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1) | ||
90 | movl r17=(5<<61) | ||
91 | mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)) | ||
92 | movl r19=(6<<61) | ||
93 | mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)) | ||
94 | movl r21=(7<<61) | ||
95 | ;; | ||
96 | mov rr[r7]=r6 | ||
97 | mov rr[r9]=r8 | ||
98 | mov rr[r11]=r10 | ||
99 | mov rr[r13]=r12 | ||
100 | mov rr[r15]=r14 | ||
101 | mov rr[r17]=r16 | ||
102 | mov rr[r19]=r18 | ||
103 | mov rr[r21]=r20 | ||
104 | ;; | ||
105 | /* | 223 | /* |
106 | * Now pin mappings into the TLB for kernel text and data | 224 | * Now pin mappings into the TLB for kernel text and data |
107 | */ | 225 | */ |
@@ -142,6 +260,13 @@ start_ap: | |||
142 | ;; | 260 | ;; |
143 | 1: // now we are in virtual mode | 261 | 1: // now we are in virtual mode |
144 | 262 | ||
263 | movl r2=sal_state_for_booting_cpu;; | ||
264 | ld8 r16=[r2];; | ||
265 | |||
266 | STORE_REGION_REGS(r16, r8,r9,r10,r11,r12,r13,r14,r15); | ||
267 | SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(r16,r17,r25) | ||
268 | ;; | ||
269 | |||
145 | // set IVT entry point---can't access I/O ports without it | 270 | // set IVT entry point---can't access I/O ports without it |
146 | movl r3=ia64_ivt | 271 | movl r3=ia64_ivt |
147 | ;; | 272 | ;; |
@@ -211,12 +336,13 @@ start_ap: | |||
211 | mov IA64_KR(CURRENT_STACK)=r16 | 336 | mov IA64_KR(CURRENT_STACK)=r16 |
212 | mov r13=r2 | 337 | mov r13=r2 |
213 | /* | 338 | /* |
214 | * Reserve space at the top of the stack for "struct pt_regs". Kernel threads | 339 | * Reserve space at the top of the stack for "struct pt_regs". Kernel |
215 | * don't store interesting values in that structure, but the space still needs | 340 | * threads don't store interesting values in that structure, but the space |
216 | * to be there because time-critical stuff such as the context switching can | 341 | * still needs to be there because time-critical stuff such as the context |
217 | * be implemented more efficiently (for example, __switch_to() | 342 | * switching can be implemented more efficiently (for example, __switch_to() |
218 | * always sets the psr.dfh bit of the task it is switching to). | 343 | * always sets the psr.dfh bit of the task it is switching to). |
219 | */ | 344 | */ |
345 | |||
220 | addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2 | 346 | addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2 |
221 | addl r2=IA64_RBS_OFFSET,r2 // initialize the RSE | 347 | addl r2=IA64_RBS_OFFSET,r2 // initialize the RSE |
222 | mov ar.rsc=0 // place RSE in enforced lazy mode | 348 | mov ar.rsc=0 // place RSE in enforced lazy mode |
@@ -993,4 +1119,98 @@ END(ia64_spinlock_contention) | |||
993 | 1119 | ||
994 | #endif | 1120 | #endif |
995 | 1121 | ||
1122 | #ifdef CONFIG_HOTPLUG_CPU | ||
1123 | GLOBAL_ENTRY(ia64_jump_to_sal) | ||
1124 | alloc r16=ar.pfs,1,0,0,0;; | ||
1125 | rsm psr.i | psr.ic | ||
1126 | { | ||
1127 | flushrs | ||
1128 | srlz.i | ||
1129 | } | ||
1130 | tpa r25=in0 | ||
1131 | movl r18=tlb_purge_done;; | ||
1132 | DATA_VA_TO_PA(r18);; | ||
1133 | mov b1=r18 // Return location | ||
1134 | movl r18=ia64_do_tlb_purge;; | ||
1135 | DATA_VA_TO_PA(r18);; | ||
1136 | mov b2=r18 // doing tlb_flush work | ||
1137 | mov ar.rsc=0 // Put RSE in enforced lazy, LE mode | ||
1138 | movl r17=1f;; | ||
1139 | DATA_VA_TO_PA(r17);; | ||
1140 | mov cr.iip=r17 | ||
1141 | movl r16=SAL_PSR_BITS_TO_SET;; | ||
1142 | mov cr.ipsr=r16 | ||
1143 | mov cr.ifs=r0;; | ||
1144 | rfi;; | ||
1145 | 1: | ||
1146 | /* | ||
1147 | * Invalidate all TLB data/inst | ||
1148 | */ | ||
1149 | br.sptk.many b2;; // jump to tlb purge code | ||
1150 | |||
1151 | tlb_purge_done: | ||
1152 | RESTORE_REGION_REGS(r25, r17,r18,r19);; | ||
1153 | RESTORE_REG(b0, r25, r17);; | ||
1154 | RESTORE_REG(b1, r25, r17);; | ||
1155 | RESTORE_REG(b2, r25, r17);; | ||
1156 | RESTORE_REG(b3, r25, r17);; | ||
1157 | RESTORE_REG(b4, r25, r17);; | ||
1158 | RESTORE_REG(b5, r25, r17);; | ||
1159 | ld8 r1=[r25],0x08;; | ||
1160 | ld8 r12=[r25],0x08;; | ||
1161 | ld8 r13=[r25],0x08;; | ||
1162 | RESTORE_REG(ar.fpsr, r25, r17);; | ||
1163 | RESTORE_REG(ar.pfs, r25, r17);; | ||
1164 | RESTORE_REG(ar.rnat, r25, r17);; | ||
1165 | RESTORE_REG(ar.unat, r25, r17);; | ||
1166 | RESTORE_REG(ar.bspstore, r25, r17);; | ||
1167 | RESTORE_REG(cr.dcr, r25, r17);; | ||
1168 | RESTORE_REG(cr.iva, r25, r17);; | ||
1169 | RESTORE_REG(cr.pta, r25, r17);; | ||
1170 | RESTORE_REG(cr.itv, r25, r17);; | ||
1171 | RESTORE_REG(cr.pmv, r25, r17);; | ||
1172 | RESTORE_REG(cr.cmcv, r25, r17);; | ||
1173 | RESTORE_REG(cr.lrr0, r25, r17);; | ||
1174 | RESTORE_REG(cr.lrr1, r25, r17);; | ||
1175 | ld8 r4=[r25],0x08;; | ||
1176 | ld8 r5=[r25],0x08;; | ||
1177 | ld8 r6=[r25],0x08;; | ||
1178 | ld8 r7=[r25],0x08;; | ||
1179 | ld8 r17=[r25],0x08;; | ||
1180 | mov pr=r17,-1;; | ||
1181 | RESTORE_REG(ar.lc, r25, r17);; | ||
1182 | /* | ||
1183 | * Now Restore floating point regs | ||
1184 | */ | ||
1185 | ldf.fill.nta f2=[r25],16;; | ||
1186 | ldf.fill.nta f3=[r25],16;; | ||
1187 | ldf.fill.nta f4=[r25],16;; | ||
1188 | ldf.fill.nta f5=[r25],16;; | ||
1189 | ldf.fill.nta f16=[r25],16;; | ||
1190 | ldf.fill.nta f17=[r25],16;; | ||
1191 | ldf.fill.nta f18=[r25],16;; | ||
1192 | ldf.fill.nta f19=[r25],16;; | ||
1193 | ldf.fill.nta f20=[r25],16;; | ||
1194 | ldf.fill.nta f21=[r25],16;; | ||
1195 | ldf.fill.nta f22=[r25],16;; | ||
1196 | ldf.fill.nta f23=[r25],16;; | ||
1197 | ldf.fill.nta f24=[r25],16;; | ||
1198 | ldf.fill.nta f25=[r25],16;; | ||
1199 | ldf.fill.nta f26=[r25],16;; | ||
1200 | ldf.fill.nta f27=[r25],16;; | ||
1201 | ldf.fill.nta f28=[r25],16;; | ||
1202 | ldf.fill.nta f29=[r25],16;; | ||
1203 | ldf.fill.nta f30=[r25],16;; | ||
1204 | ldf.fill.nta f31=[r25],16;; | ||
1205 | |||
1206 | /* | ||
1207 | * Now that we have done all the register restores | ||
1208 | * we are now ready for the big DIVE to SAL Land | ||
1209 | */ | ||
1210 | ssm psr.ic;; | ||
1211 | srlz.d;; | ||
1212 | br.ret.sptk.many b0;; | ||
1213 | END(ia64_jump_to_sal) | ||
1214 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
1215 | |||
996 | #endif /* CONFIG_SMP */ | 1216 | #endif /* CONFIG_SMP */ |
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index cf3f8014f9ad..ef3fd7265b67 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -110,46 +110,19 @@ | |||
110 | .global ia64_os_mca_dispatch_end | 110 | .global ia64_os_mca_dispatch_end |
111 | .global ia64_sal_to_os_handoff_state | 111 | .global ia64_sal_to_os_handoff_state |
112 | .global ia64_os_to_sal_handoff_state | 112 | .global ia64_os_to_sal_handoff_state |
113 | .global ia64_do_tlb_purge | ||
113 | 114 | ||
114 | .text | 115 | .text |
115 | .align 16 | 116 | .align 16 |
116 | 117 | ||
117 | ia64_os_mca_dispatch: | 118 | /* |
118 | 119 | * Just the TLB purge part is moved to a separate function | |
119 | // Serialize all MCA processing | 120 | * so we can re-use the code for cpu hotplug code as well |
120 | mov r3=1;; | 121 | * Caller should now setup b1, so we can branch once the |
121 | LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; | 122 | * tlb flush is complete. |
122 | ia64_os_mca_spin: | 123 | */ |
123 | xchg8 r4=[r2],r3;; | ||
124 | cmp.ne p6,p0=r4,r0 | ||
125 | (p6) br ia64_os_mca_spin | ||
126 | |||
127 | // Save the SAL to OS MCA handoff state as defined | ||
128 | // by SAL SPEC 3.0 | ||
129 | // NOTE : The order in which the state gets saved | ||
130 | // is dependent on the way the C-structure | ||
131 | // for ia64_mca_sal_to_os_state_t has been | ||
132 | // defined in include/asm/mca.h | ||
133 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | ||
134 | ;; | ||
135 | |||
136 | // LOG PROCESSOR STATE INFO FROM HERE ON.. | ||
137 | begin_os_mca_dump: | ||
138 | br ia64_os_mca_proc_state_dump;; | ||
139 | |||
140 | ia64_os_mca_done_dump: | ||
141 | |||
142 | LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56) | ||
143 | ;; | ||
144 | ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. | ||
145 | ;; | ||
146 | tbit.nz p6,p7=r18,60 | ||
147 | (p7) br.spnt done_tlb_purge_and_reload | ||
148 | |||
149 | // The following code purges TC and TR entries. Then reload all TC entries. | ||
150 | // Purge percpu data TC entries. | ||
151 | begin_tlb_purge_and_reload: | ||
152 | 124 | ||
125 | ia64_do_tlb_purge: | ||
153 | #define O(member) IA64_CPUINFO_##member##_OFFSET | 126 | #define O(member) IA64_CPUINFO_##member##_OFFSET |
154 | 127 | ||
155 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 | 128 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 |
@@ -230,6 +203,51 @@ begin_tlb_purge_and_reload: | |||
230 | ;; | 203 | ;; |
231 | srlz.i | 204 | srlz.i |
232 | ;; | 205 | ;; |
206 | // Now branch away to caller. | ||
207 | br.sptk.many b1 | ||
208 | ;; | ||
209 | |||
210 | ia64_os_mca_dispatch: | ||
211 | |||
212 | // Serialize all MCA processing | ||
213 | mov r3=1;; | ||
214 | LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; | ||
215 | ia64_os_mca_spin: | ||
216 | xchg8 r4=[r2],r3;; | ||
217 | cmp.ne p6,p0=r4,r0 | ||
218 | (p6) br ia64_os_mca_spin | ||
219 | |||
220 | // Save the SAL to OS MCA handoff state as defined | ||
221 | // by SAL SPEC 3.0 | ||
222 | // NOTE : The order in which the state gets saved | ||
223 | // is dependent on the way the C-structure | ||
224 | // for ia64_mca_sal_to_os_state_t has been | ||
225 | // defined in include/asm/mca.h | ||
226 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | ||
227 | ;; | ||
228 | |||
229 | // LOG PROCESSOR STATE INFO FROM HERE ON.. | ||
230 | begin_os_mca_dump: | ||
231 | br ia64_os_mca_proc_state_dump;; | ||
232 | |||
233 | ia64_os_mca_done_dump: | ||
234 | |||
235 | LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56) | ||
236 | ;; | ||
237 | ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. | ||
238 | ;; | ||
239 | tbit.nz p6,p7=r18,60 | ||
240 | (p7) br.spnt done_tlb_purge_and_reload | ||
241 | |||
242 | // The following code purges TC and TR entries. Then reload all TC entries. | ||
243 | // Purge percpu data TC entries. | ||
244 | begin_tlb_purge_and_reload: | ||
245 | movl r18=ia64_reload_tr;; | ||
246 | LOAD_PHYSICAL(p0,r18,ia64_reload_tr);; | ||
247 | mov b1=r18;; | ||
248 | br.sptk.many ia64_do_tlb_purge;; | ||
249 | |||
250 | ia64_reload_tr: | ||
233 | // Finally reload the TR registers. | 251 | // Finally reload the TR registers. |
234 | // 1. Reload DTR/ITR registers for kernel. | 252 | // 1. Reload DTR/ITR registers for kernel. |
235 | mov r18=KERNEL_TR_PAGE_SHIFT<<2 | 253 | mov r18=KERNEL_TR_PAGE_SHIFT<<2 |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 91293388dd29..7c43aea5f7f7 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co | 4 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
6 | * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support | ||
6 | */ | 7 | */ |
7 | #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ | 8 | #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ |
8 | #include <linux/config.h> | 9 | #include <linux/config.h> |
@@ -200,27 +201,20 @@ default_idle (void) | |||
200 | static inline void play_dead(void) | 201 | static inline void play_dead(void) |
201 | { | 202 | { |
202 | extern void ia64_cpu_local_tick (void); | 203 | extern void ia64_cpu_local_tick (void); |
204 | unsigned int this_cpu = smp_processor_id(); | ||
205 | |||
203 | /* Ack it */ | 206 | /* Ack it */ |
204 | __get_cpu_var(cpu_state) = CPU_DEAD; | 207 | __get_cpu_var(cpu_state) = CPU_DEAD; |
205 | 208 | ||
206 | /* We shouldn't have to disable interrupts while dead, but | ||
207 | * some interrupts just don't seem to go away, and this makes | ||
208 | * it "work" for testing purposes. */ | ||
209 | max_xtp(); | 209 | max_xtp(); |
210 | local_irq_disable(); | 210 | local_irq_disable(); |
211 | /* Death loop */ | 211 | idle_task_exit(); |
212 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) | 212 | ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]); |
213 | cpu_relax(); | ||
214 | |||
215 | /* | 213 | /* |
216 | * Enable timer interrupts from now on | 214 | * The above is a point of no-return, the processor is |
217 | * Not required if we put processor in SAL_BOOT_RENDEZ mode. | 215 | * expected to be in SAL loop now. |
218 | */ | 216 | */ |
219 | local_flush_tlb_all(); | 217 | BUG(); |
220 | cpu_set(smp_processor_id(), cpu_online_map); | ||
221 | wmb(); | ||
222 | ia64_cpu_local_tick (); | ||
223 | local_irq_enable(); | ||
224 | } | 218 | } |
225 | #else | 219 | #else |
226 | static inline void play_dead(void) | 220 | static inline void play_dead(void) |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 5318f0cbfc26..ca1536db3394 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence. | 9 | * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence. |
10 | * smp_boot_cpus()/smp_commence() is replaced by | 10 | * smp_boot_cpus()/smp_commence() is replaced by |
11 | * smp_prepare_cpus()/__cpu_up()/smp_cpus_done(). | 11 | * smp_prepare_cpus()/__cpu_up()/smp_cpus_done(). |
12 | * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support | ||
12 | */ | 13 | */ |
13 | #include <linux/config.h> | 14 | #include <linux/config.h> |
14 | 15 | ||
@@ -58,6 +59,37 @@ | |||
58 | #define Dprintk(x...) | 59 | #define Dprintk(x...) |
59 | #endif | 60 | #endif |
60 | 61 | ||
62 | #ifdef CONFIG_HOTPLUG_CPU | ||
63 | /* | ||
64 | * Store all idle threads, this can be reused instead of creating | ||
65 | * a new thread. Also avoids complicated thread destroy functionality | ||
66 | * for idle threads. | ||
67 | */ | ||
68 | struct task_struct *idle_thread_array[NR_CPUS]; | ||
69 | |||
70 | /* | ||
71 | * Global array allocated for NR_CPUS at boot time | ||
72 | */ | ||
73 | struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS]; | ||
74 | |||
75 | /* | ||
76 | * start_ap in head.S uses this to store current booting cpu | ||
77 | * info. | ||
78 | */ | ||
79 | struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; | ||
80 | |||
81 | #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]); | ||
82 | |||
83 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | ||
84 | #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) | ||
85 | |||
86 | #else | ||
87 | |||
88 | #define get_idle_for_cpu(x) (NULL) | ||
89 | #define set_idle_for_cpu(x,p) | ||
90 | #define set_brendez_area(x) | ||
91 | #endif | ||
92 | |||
61 | 93 | ||
62 | /* | 94 | /* |
63 | * ITC synchronization related stuff: | 95 | * ITC synchronization related stuff: |
@@ -345,7 +377,6 @@ start_secondary (void *unused) | |||
345 | { | 377 | { |
346 | /* Early console may use I/O ports */ | 378 | /* Early console may use I/O ports */ |
347 | ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); | 379 | ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); |
348 | |||
349 | Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); | 380 | Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); |
350 | efi_map_pal_code(); | 381 | efi_map_pal_code(); |
351 | cpu_init(); | 382 | cpu_init(); |
@@ -384,6 +415,13 @@ do_boot_cpu (int sapicid, int cpu) | |||
384 | .done = COMPLETION_INITIALIZER(c_idle.done), | 415 | .done = COMPLETION_INITIALIZER(c_idle.done), |
385 | }; | 416 | }; |
386 | DECLARE_WORK(work, do_fork_idle, &c_idle); | 417 | DECLARE_WORK(work, do_fork_idle, &c_idle); |
418 | |||
419 | c_idle.idle = get_idle_for_cpu(cpu); | ||
420 | if (c_idle.idle) { | ||
421 | init_idle(c_idle.idle, cpu); | ||
422 | goto do_rest; | ||
423 | } | ||
424 | |||
387 | /* | 425 | /* |
388 | * We can't use kernel_thread since we must avoid to reschedule the child. | 426 | * We can't use kernel_thread since we must avoid to reschedule the child. |
389 | */ | 427 | */ |
@@ -396,10 +434,15 @@ do_boot_cpu (int sapicid, int cpu) | |||
396 | 434 | ||
397 | if (IS_ERR(c_idle.idle)) | 435 | if (IS_ERR(c_idle.idle)) |
398 | panic("failed fork for CPU %d", cpu); | 436 | panic("failed fork for CPU %d", cpu); |
437 | |||
438 | set_idle_for_cpu(cpu, c_idle.idle); | ||
439 | |||
440 | do_rest: | ||
399 | task_for_booting_cpu = c_idle.idle; | 441 | task_for_booting_cpu = c_idle.idle; |
400 | 442 | ||
401 | Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); | 443 | Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); |
402 | 444 | ||
445 | set_brendez_area(cpu); | ||
403 | platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); | 446 | platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); |
404 | 447 | ||
405 | /* | 448 | /* |
@@ -555,16 +598,6 @@ void __devinit smp_prepare_boot_cpu(void) | |||
555 | #ifdef CONFIG_HOTPLUG_CPU | 598 | #ifdef CONFIG_HOTPLUG_CPU |
556 | extern void fixup_irqs(void); | 599 | extern void fixup_irqs(void); |
557 | /* must be called with cpucontrol mutex held */ | 600 | /* must be called with cpucontrol mutex held */ |
558 | static int __devinit cpu_enable(unsigned int cpu) | ||
559 | { | ||
560 | per_cpu(cpu_state,cpu) = CPU_UP_PREPARE; | ||
561 | wmb(); | ||
562 | |||
563 | while (!cpu_online(cpu)) | ||
564 | cpu_relax(); | ||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | int __cpu_disable(void) | 601 | int __cpu_disable(void) |
569 | { | 602 | { |
570 | int cpu = smp_processor_id(); | 603 | int cpu = smp_processor_id(); |
@@ -577,7 +610,7 @@ int __cpu_disable(void) | |||
577 | 610 | ||
578 | fixup_irqs(); | 611 | fixup_irqs(); |
579 | local_flush_tlb_all(); | 612 | local_flush_tlb_all(); |
580 | printk ("Disabled cpu %u\n", smp_processor_id()); | 613 | cpu_clear(cpu, cpu_callin_map); |
581 | return 0; | 614 | return 0; |
582 | } | 615 | } |
583 | 616 | ||
@@ -589,12 +622,7 @@ void __cpu_die(unsigned int cpu) | |||
589 | /* They ack this in play_dead by setting CPU_DEAD */ | 622 | /* They ack this in play_dead by setting CPU_DEAD */ |
590 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) | 623 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) |
591 | { | 624 | { |
592 | /* | 625 | printk ("CPU %d is now offline\n", cpu); |
593 | * TBD: Enable this when physical removal | ||
594 | * or when we put the processor is put in | ||
595 | * SAL_BOOT_RENDEZ mode | ||
596 | * cpu_clear(cpu, cpu_callin_map); | ||
597 | */ | ||
598 | return; | 626 | return; |
599 | } | 627 | } |
600 | msleep(100); | 628 | msleep(100); |
@@ -602,11 +630,6 @@ void __cpu_die(unsigned int cpu) | |||
602 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | 630 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
603 | } | 631 | } |
604 | #else /* !CONFIG_HOTPLUG_CPU */ | 632 | #else /* !CONFIG_HOTPLUG_CPU */ |
605 | static int __devinit cpu_enable(unsigned int cpu) | ||
606 | { | ||
607 | return 0; | ||
608 | } | ||
609 | |||
610 | int __cpu_disable(void) | 633 | int __cpu_disable(void) |
611 | { | 634 | { |
612 | return -ENOSYS; | 635 | return -ENOSYS; |
@@ -648,16 +671,12 @@ __cpu_up (unsigned int cpu) | |||
648 | return -EINVAL; | 671 | return -EINVAL; |
649 | 672 | ||
650 | /* | 673 | /* |
651 | * Already booted.. just enable and get outa idle lool | 674 | * Already booted cpu? not valid anymore since we dont |
675 | * do idle loop tightspin anymore. | ||
652 | */ | 676 | */ |
653 | if (cpu_isset(cpu, cpu_callin_map)) | 677 | if (cpu_isset(cpu, cpu_callin_map)) |
654 | { | 678 | return -EINVAL; |
655 | cpu_enable(cpu); | 679 | |
656 | local_irq_enable(); | ||
657 | while (!cpu_isset(cpu, cpu_online_map)) | ||
658 | mb(); | ||
659 | return 0; | ||
660 | } | ||
661 | /* Processor goes to start_secondary(), sets online flag */ | 680 | /* Processor goes to start_secondary(), sets online flag */ |
662 | ret = do_boot_cpu(sapicid, cpu); | 681 | ret = do_boot_cpu(sapicid, cpu); |
663 | if (ret < 0) | 682 | if (ret < 0) |