diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2012-03-11 11:59:26 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2012-03-11 11:59:28 -0400 |
commit | 8b646bd759086f6090fe27acf414c0b5faa737f4 (patch) | |
tree | 29475659031c57ccf2ca43899614ab5c6b1899a0 /arch/s390/kernel/smp.c | |
parent | 7e180bd8020d213bb0de15c3606968f8a9262439 (diff) |
[S390] rework smp code
Define struct pcpu and merge some of the NR_CPUS arrays into it, including
__cpu_logical_map, current_set and smp_cpu_state. Split smp related
functions to those operating on physical cpus and the functions operating
on a logical cpu number. Make the functions for physical cpus use a
pointer to a struct pcpu. This hides the knowledge about cpu addresses in
smp.c, entry[64].S and swsusp_asm64.S, thus remove the sigp.h header.
The PSW restart mechanism is used to start secondary cpus, calling a
function on an online cpu, calling a function on the ipl cpu, and for
the nmi signal. Replace the different assembler functions with a
single function restart_int_handler. The new entry point calls a function
whose pointer is stored in the lowcore of the target cpu and it can wait
for the source cpu to stop. This covers all existing use cases.
Overall the code is now simpler and there are ~380 lines less code.
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r-- | arch/s390/kernel/smp.c | 1083 |
1 files changed, 543 insertions, 540 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2398ce6b15ae..6db8526a602d 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -1,23 +1,18 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/smp.c | 2 | * SMP related functions |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 1999, 2009 | 4 | * Copyright IBM Corp. 1999,2012 |
5 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | 5 | * Author(s): Denis Joseph Barrow, |
6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 6 | * Martin Schwidefsky <schwidefsky@de.ibm.com>, |
7 | * Heiko Carstens (heiko.carstens@de.ibm.com) | 7 | * Heiko Carstens <heiko.carstens@de.ibm.com>, |
8 | * | 8 | * |
9 | * based on other smp stuff by | 9 | * based on other smp stuff by |
10 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> | 10 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> |
11 | * (c) 1998 Ingo Molnar | 11 | * (c) 1998 Ingo Molnar |
12 | * | 12 | * |
13 | * We work with logical cpu numbering everywhere we can. The only | 13 | * The code outside of smp.c uses logical cpu numbers, only smp.c does |
14 | * functions using the real cpu address (got from STAP) are the sigp | 14 | * the translation of logical to physical cpu ids. All new code that |
15 | * functions. For all other functions we use the identity mapping. | 15 | * operates on physical cpu numbers needs to go into smp.c. |
16 | * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is | ||
17 | * used e.g. to find the idle task belonging to a logical cpu. Every array | ||
18 | * in the kernel is sorted by the logical cpu number and not by the physical | ||
19 | * one which is causing all the confusion with __cpu_logical_map and | ||
20 | * cpu_number_map in other architectures. | ||
21 | */ | 16 | */ |
22 | 17 | ||
23 | #define KMSG_COMPONENT "cpu" | 18 | #define KMSG_COMPONENT "cpu" |
@@ -31,140 +26,381 @@ | |||
31 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
32 | #include <linux/kernel_stat.h> | 27 | #include <linux/kernel_stat.h> |
33 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
34 | #include <linux/cache.h> | ||
35 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
36 | #include <linux/irqflags.h> | 30 | #include <linux/irqflags.h> |
37 | #include <linux/cpu.h> | 31 | #include <linux/cpu.h> |
38 | #include <linux/timex.h> | ||
39 | #include <linux/bootmem.h> | ||
40 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
41 | #include <linux/crash_dump.h> | 33 | #include <linux/crash_dump.h> |
42 | #include <asm/asm-offsets.h> | 34 | #include <asm/asm-offsets.h> |
43 | #include <asm/ipl.h> | 35 | #include <asm/ipl.h> |
44 | #include <asm/setup.h> | 36 | #include <asm/setup.h> |
45 | #include <asm/sigp.h> | ||
46 | #include <asm/pgalloc.h> | ||
47 | #include <asm/irq.h> | 37 | #include <asm/irq.h> |
48 | #include <asm/cpcmd.h> | ||
49 | #include <asm/tlbflush.h> | 38 | #include <asm/tlbflush.h> |
50 | #include <asm/timer.h> | 39 | #include <asm/timer.h> |
51 | #include <asm/lowcore.h> | 40 | #include <asm/lowcore.h> |
52 | #include <asm/sclp.h> | 41 | #include <asm/sclp.h> |
53 | #include <asm/cputime.h> | ||
54 | #include <asm/vdso.h> | 42 | #include <asm/vdso.h> |
55 | #include <asm/cpu.h> | ||
56 | #include "entry.h" | 43 | #include "entry.h" |
57 | 44 | ||
58 | /* logical cpu to cpu address */ | 45 | enum { |
59 | unsigned short __cpu_logical_map[NR_CPUS]; | 46 | sigp_sense = 1, |
47 | sigp_external_call = 2, | ||
48 | sigp_emergency_signal = 3, | ||
49 | sigp_start = 4, | ||
50 | sigp_stop = 5, | ||
51 | sigp_restart = 6, | ||
52 | sigp_stop_and_store_status = 9, | ||
53 | sigp_initial_cpu_reset = 11, | ||
54 | sigp_cpu_reset = 12, | ||
55 | sigp_set_prefix = 13, | ||
56 | sigp_store_status_at_address = 14, | ||
57 | sigp_store_extended_status_at_address = 15, | ||
58 | sigp_set_architecture = 18, | ||
59 | sigp_conditional_emergency_signal = 19, | ||
60 | sigp_sense_running = 21, | ||
61 | }; | ||
60 | 62 | ||
61 | static struct task_struct *current_set[NR_CPUS]; | 63 | enum { |
64 | sigp_order_code_accepted = 0, | ||
65 | sigp_status_stored = 1, | ||
66 | sigp_busy = 2, | ||
67 | sigp_not_operational = 3, | ||
68 | }; | ||
62 | 69 | ||
63 | static u8 smp_cpu_type; | 70 | enum { |
64 | static int smp_use_sigp_detection; | 71 | ec_schedule = 0, |
72 | ec_call_function, | ||
73 | ec_call_function_single, | ||
74 | ec_stop_cpu, | ||
75 | }; | ||
65 | 76 | ||
66 | enum s390_cpu_state { | 77 | enum { |
67 | CPU_STATE_STANDBY, | 78 | CPU_STATE_STANDBY, |
68 | CPU_STATE_CONFIGURED, | 79 | CPU_STATE_CONFIGURED, |
69 | }; | 80 | }; |
70 | 81 | ||
82 | struct pcpu { | ||
83 | struct cpu cpu; | ||
84 | struct task_struct *idle; /* idle process for the cpu */ | ||
85 | struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ | ||
86 | unsigned long async_stack; /* async stack for the cpu */ | ||
87 | unsigned long panic_stack; /* panic stack for the cpu */ | ||
88 | unsigned long ec_mask; /* bit mask for ec_xxx functions */ | ||
89 | int state; /* physical cpu state */ | ||
90 | u32 status; /* last status received via sigp */ | ||
91 | u16 address; /* physical cpu address */ | ||
92 | }; | ||
93 | |||
94 | static u8 boot_cpu_type; | ||
95 | static u16 boot_cpu_address; | ||
96 | static struct pcpu pcpu_devices[NR_CPUS]; | ||
97 | |||
71 | DEFINE_MUTEX(smp_cpu_state_mutex); | 98 | DEFINE_MUTEX(smp_cpu_state_mutex); |
72 | static int smp_cpu_state[NR_CPUS]; | ||
73 | 99 | ||
74 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 100 | /* |
101 | * Signal processor helper functions. | ||
102 | */ | ||
103 | static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status) | ||
104 | { | ||
105 | register unsigned int reg1 asm ("1") = parm; | ||
106 | int cc; | ||
75 | 107 | ||
76 | static void smp_ext_bitcall(int, int); | 108 | asm volatile( |
109 | " sigp %1,%2,0(%3)\n" | ||
110 | " ipm %0\n" | ||
111 | " srl %0,28\n" | ||
112 | : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc"); | ||
113 | if (status && cc == 1) | ||
114 | *status = reg1; | ||
115 | return cc; | ||
116 | } | ||
77 | 117 | ||
78 | static int raw_cpu_stopped(int cpu) | 118 | static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status) |
79 | { | 119 | { |
80 | u32 status; | 120 | int cc; |
81 | 121 | ||
82 | switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) { | 122 | while (1) { |
83 | case sigp_status_stored: | 123 | cc = __pcpu_sigp(addr, order, parm, status); |
84 | /* Check for stopped and check stop state */ | 124 | if (cc != sigp_busy) |
85 | if (status & 0x50) | 125 | return cc; |
86 | return 1; | 126 | cpu_relax(); |
87 | break; | ||
88 | default: | ||
89 | break; | ||
90 | } | 127 | } |
91 | return 0; | ||
92 | } | 128 | } |
93 | 129 | ||
94 | static inline int cpu_stopped(int cpu) | 130 | static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) |
95 | { | 131 | { |
96 | return raw_cpu_stopped(cpu_logical_map(cpu)); | 132 | int cc, retry; |
133 | |||
134 | for (retry = 0; ; retry++) { | ||
135 | cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status); | ||
136 | if (cc != sigp_busy) | ||
137 | break; | ||
138 | if (retry >= 3) | ||
139 | udelay(10); | ||
140 | } | ||
141 | return cc; | ||
142 | } | ||
143 | |||
144 | static inline int pcpu_stopped(struct pcpu *pcpu) | ||
145 | { | ||
146 | if (__pcpu_sigp(pcpu->address, sigp_sense, | ||
147 | 0, &pcpu->status) != sigp_status_stored) | ||
148 | return 0; | ||
149 | /* Check for stopped and check stop state */ | ||
150 | return !!(pcpu->status & 0x50); | ||
151 | } | ||
152 | |||
153 | static inline int pcpu_running(struct pcpu *pcpu) | ||
154 | { | ||
155 | if (__pcpu_sigp(pcpu->address, sigp_sense_running, | ||
156 | 0, &pcpu->status) != sigp_status_stored) | ||
157 | return 1; | ||
158 | /* Check for running status */ | ||
159 | return !(pcpu->status & 0x400); | ||
97 | } | 160 | } |
98 | 161 | ||
99 | /* | 162 | /* |
100 | * Ensure that PSW restart is done on an online CPU | 163 | * Find struct pcpu by cpu address. |
101 | */ | 164 | */ |
102 | void smp_restart_with_online_cpu(void) | 165 | static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address) |
103 | { | 166 | { |
104 | int cpu; | 167 | int cpu; |
105 | 168 | ||
106 | for_each_online_cpu(cpu) { | 169 | for_each_cpu(cpu, mask) |
107 | if (stap() == __cpu_logical_map[cpu]) { | 170 | if (pcpu_devices[cpu].address == address) |
108 | /* We are online: Enable DAT again and return */ | 171 | return pcpu_devices + cpu; |
109 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); | 172 | return NULL; |
110 | return; | 173 | } |
111 | } | 174 | |
175 | static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) | ||
176 | { | ||
177 | int order; | ||
178 | |||
179 | set_bit(ec_bit, &pcpu->ec_mask); | ||
180 | order = pcpu_running(pcpu) ? | ||
181 | sigp_external_call : sigp_emergency_signal; | ||
182 | pcpu_sigp_retry(pcpu, order, 0); | ||
183 | } | ||
184 | |||
185 | static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) | ||
186 | { | ||
187 | struct _lowcore *lc; | ||
188 | |||
189 | if (pcpu != &pcpu_devices[0]) { | ||
190 | pcpu->lowcore = (struct _lowcore *) | ||
191 | __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); | ||
192 | pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | ||
193 | pcpu->panic_stack = __get_free_page(GFP_KERNEL); | ||
194 | if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack) | ||
195 | goto out; | ||
112 | } | 196 | } |
113 | /* We are not online: Do PSW restart on an online CPU */ | 197 | lc = pcpu->lowcore; |
114 | while (sigp(cpu, sigp_restart) == sigp_busy) | 198 | memcpy(lc, &S390_lowcore, 512); |
115 | cpu_relax(); | 199 | memset((char *) lc + 512, 0, sizeof(*lc) - 512); |
116 | /* And stop ourself */ | 200 | lc->async_stack = pcpu->async_stack + ASYNC_SIZE; |
117 | while (raw_sigp(stap(), sigp_stop) == sigp_busy) | 201 | lc->panic_stack = pcpu->panic_stack + PAGE_SIZE; |
118 | cpu_relax(); | 202 | lc->cpu_nr = cpu; |
119 | for (;;); | 203 | #ifndef CONFIG_64BIT |
204 | if (MACHINE_HAS_IEEE) { | ||
205 | lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); | ||
206 | if (!lc->extended_save_area_addr) | ||
207 | goto out; | ||
208 | } | ||
209 | #else | ||
210 | if (vdso_alloc_per_cpu(lc)) | ||
211 | goto out; | ||
212 | #endif | ||
213 | lowcore_ptr[cpu] = lc; | ||
214 | pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc); | ||
215 | return 0; | ||
216 | out: | ||
217 | if (pcpu != &pcpu_devices[0]) { | ||
218 | free_page(pcpu->panic_stack); | ||
219 | free_pages(pcpu->async_stack, ASYNC_ORDER); | ||
220 | free_pages((unsigned long) pcpu->lowcore, LC_ORDER); | ||
221 | } | ||
222 | return -ENOMEM; | ||
120 | } | 223 | } |
121 | 224 | ||
122 | void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | 225 | static void pcpu_free_lowcore(struct pcpu *pcpu) |
123 | { | 226 | { |
124 | struct _lowcore *lc, *current_lc; | 227 | pcpu_sigp_retry(pcpu, sigp_set_prefix, 0); |
125 | struct stack_frame *sf; | 228 | lowcore_ptr[pcpu - pcpu_devices] = NULL; |
126 | struct pt_regs *regs; | 229 | #ifndef CONFIG_64BIT |
127 | unsigned long sp; | 230 | if (MACHINE_HAS_IEEE) { |
128 | 231 | struct _lowcore *lc = pcpu->lowcore; | |
129 | if (smp_processor_id() == 0) | 232 | |
130 | func(data); | 233 | free_page((unsigned long) lc->extended_save_area_addr); |
131 | __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | | 234 | lc->extended_save_area_addr = 0; |
132 | PSW_MASK_EA | PSW_MASK_BA); | 235 | } |
133 | /* Disable lowcore protection */ | 236 | #else |
134 | __ctl_clear_bit(0, 28); | 237 | vdso_free_per_cpu(pcpu->lowcore); |
135 | current_lc = lowcore_ptr[smp_processor_id()]; | 238 | #endif |
136 | lc = lowcore_ptr[0]; | 239 | if (pcpu != &pcpu_devices[0]) { |
137 | if (!lc) | 240 | free_page(pcpu->panic_stack); |
138 | lc = current_lc; | 241 | free_pages(pcpu->async_stack, ASYNC_ORDER); |
139 | lc->restart_psw.mask = | 242 | free_pages((unsigned long) pcpu->lowcore, LC_ORDER); |
140 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; | 243 | } |
141 | lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; | 244 | } |
142 | if (!cpu_online(0)) | 245 | |
143 | smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); | 246 | static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) |
144 | while (sigp(0, sigp_stop_and_store_status) == sigp_busy) | 247 | { |
145 | cpu_relax(); | 248 | struct _lowcore *lc = pcpu->lowcore; |
146 | sp = lc->panic_stack; | 249 | |
147 | sp -= sizeof(struct pt_regs); | 250 | atomic_inc(&init_mm.context.attach_count); |
148 | regs = (struct pt_regs *) sp; | 251 | lc->cpu_nr = cpu; |
149 | memcpy(®s->gprs, ¤t_lc->gpregs_save_area, sizeof(regs->gprs)); | 252 | lc->percpu_offset = __per_cpu_offset[cpu]; |
150 | regs->psw = current_lc->psw_save_area; | 253 | lc->kernel_asce = S390_lowcore.kernel_asce; |
151 | sp -= STACK_FRAME_OVERHEAD; | 254 | lc->machine_flags = S390_lowcore.machine_flags; |
152 | sf = (struct stack_frame *) sp; | 255 | lc->ftrace_func = S390_lowcore.ftrace_func; |
153 | sf->back_chain = 0; | 256 | lc->user_timer = lc->system_timer = lc->steal_timer = 0; |
154 | smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]); | 257 | __ctl_store(lc->cregs_save_area, 0, 15); |
258 | save_access_regs((unsigned int *) lc->access_regs_save_area); | ||
259 | memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, | ||
260 | MAX_FACILITY_BIT/8); | ||
261 | } | ||
262 | |||
263 | static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) | ||
264 | { | ||
265 | struct _lowcore *lc = pcpu->lowcore; | ||
266 | struct thread_info *ti = task_thread_info(tsk); | ||
267 | |||
268 | lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE; | ||
269 | lc->thread_info = (unsigned long) task_thread_info(tsk); | ||
270 | lc->current_task = (unsigned long) tsk; | ||
271 | lc->user_timer = ti->user_timer; | ||
272 | lc->system_timer = ti->system_timer; | ||
273 | lc->steal_timer = 0; | ||
274 | } | ||
275 | |||
276 | static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) | ||
277 | { | ||
278 | struct _lowcore *lc = pcpu->lowcore; | ||
279 | |||
280 | lc->restart_stack = lc->kernel_stack; | ||
281 | lc->restart_fn = (unsigned long) func; | ||
282 | lc->restart_data = (unsigned long) data; | ||
283 | lc->restart_source = -1UL; | ||
284 | pcpu_sigp_retry(pcpu, sigp_restart, 0); | ||
285 | } | ||
286 | |||
287 | /* | ||
288 | * Call function via PSW restart on pcpu and stop the current cpu. | ||
289 | */ | ||
290 | static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), | ||
291 | void *data, unsigned long stack) | ||
292 | { | ||
293 | struct _lowcore *lc = pcpu->lowcore; | ||
294 | unsigned short this_cpu; | ||
295 | |||
296 | __load_psw_mask(psw_kernel_bits); | ||
297 | this_cpu = stap(); | ||
298 | if (pcpu->address == this_cpu) | ||
299 | func(data); /* should not return */ | ||
300 | /* Stop target cpu (if func returns this stops the current cpu). */ | ||
301 | pcpu_sigp_retry(pcpu, sigp_stop, 0); | ||
302 | /* Restart func on the target cpu and stop the current cpu. */ | ||
303 | lc->restart_stack = stack; | ||
304 | lc->restart_fn = (unsigned long) func; | ||
305 | lc->restart_data = (unsigned long) data; | ||
306 | lc->restart_source = (unsigned long) this_cpu; | ||
307 | asm volatile( | ||
308 | "0: sigp 0,%0,6 # sigp restart to target cpu\n" | ||
309 | " brc 2,0b # busy, try again\n" | ||
310 | "1: sigp 0,%1,5 # sigp stop to current cpu\n" | ||
311 | " brc 2,1b # busy, try again\n" | ||
312 | : : "d" (pcpu->address), "d" (this_cpu) : "0", "1", "cc"); | ||
313 | for (;;) ; | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * Call function on an online CPU. | ||
318 | */ | ||
319 | void smp_call_online_cpu(void (*func)(void *), void *data) | ||
320 | { | ||
321 | struct pcpu *pcpu; | ||
322 | |||
323 | /* Use the current cpu if it is online. */ | ||
324 | pcpu = pcpu_find_address(cpu_online_mask, stap()); | ||
325 | if (!pcpu) | ||
326 | /* Use the first online cpu. */ | ||
327 | pcpu = pcpu_devices + cpumask_first(cpu_online_mask); | ||
328 | pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack); | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * Call function on the ipl CPU. | ||
333 | */ | ||
334 | void smp_call_ipl_cpu(void (*func)(void *), void *data) | ||
335 | { | ||
336 | pcpu_delegate(&pcpu_devices[0], func, data, pcpu_devices->panic_stack); | ||
337 | } | ||
338 | |||
339 | int smp_find_processor_id(u16 address) | ||
340 | { | ||
341 | int cpu; | ||
342 | |||
343 | for_each_present_cpu(cpu) | ||
344 | if (pcpu_devices[cpu].address == address) | ||
345 | return cpu; | ||
346 | return -1; | ||
155 | } | 347 | } |
156 | 348 | ||
157 | static void smp_stop_cpu(void) | 349 | int smp_vcpu_scheduled(int cpu) |
158 | { | 350 | { |
159 | while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) | 351 | return pcpu_running(pcpu_devices + cpu); |
352 | } | ||
353 | |||
354 | void smp_yield(void) | ||
355 | { | ||
356 | if (MACHINE_HAS_DIAG44) | ||
357 | asm volatile("diag 0,0,0x44"); | ||
358 | } | ||
359 | |||
360 | void smp_yield_cpu(int cpu) | ||
361 | { | ||
362 | if (MACHINE_HAS_DIAG9C) | ||
363 | asm volatile("diag %0,0,0x9c" | ||
364 | : : "d" (pcpu_devices[cpu].address)); | ||
365 | else if (MACHINE_HAS_DIAG44) | ||
366 | asm volatile("diag 0,0,0x44"); | ||
367 | } | ||
368 | |||
369 | /* | ||
370 | * Send cpus emergency shutdown signal. This gives the cpus the | ||
371 | * opportunity to complete outstanding interrupts. | ||
372 | */ | ||
373 | void smp_emergency_stop(cpumask_t *cpumask) | ||
374 | { | ||
375 | u64 end; | ||
376 | int cpu; | ||
377 | |||
378 | end = get_clock() + (1000000UL << 12); | ||
379 | for_each_cpu(cpu, cpumask) { | ||
380 | struct pcpu *pcpu = pcpu_devices + cpu; | ||
381 | set_bit(ec_stop_cpu, &pcpu->ec_mask); | ||
382 | while (__pcpu_sigp(pcpu->address, sigp_emergency_signal, | ||
383 | 0, NULL) == sigp_busy && | ||
384 | get_clock() < end) | ||
385 | cpu_relax(); | ||
386 | } | ||
387 | while (get_clock() < end) { | ||
388 | for_each_cpu(cpu, cpumask) | ||
389 | if (pcpu_stopped(pcpu_devices + cpu)) | ||
390 | cpumask_clear_cpu(cpu, cpumask); | ||
391 | if (cpumask_empty(cpumask)) | ||
392 | break; | ||
160 | cpu_relax(); | 393 | cpu_relax(); |
394 | } | ||
161 | } | 395 | } |
162 | 396 | ||
397 | /* | ||
398 | * Stop all cpus but the current one. | ||
399 | */ | ||
163 | void smp_send_stop(void) | 400 | void smp_send_stop(void) |
164 | { | 401 | { |
165 | cpumask_t cpumask; | 402 | cpumask_t cpumask; |
166 | int cpu; | 403 | int cpu; |
167 | u64 end; | ||
168 | 404 | ||
169 | /* Disable all interrupts/machine checks */ | 405 | /* Disable all interrupts/machine checks */ |
170 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); | 406 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); |
@@ -173,56 +409,46 @@ void smp_send_stop(void) | |||
173 | cpumask_copy(&cpumask, cpu_online_mask); | 409 | cpumask_copy(&cpumask, cpu_online_mask); |
174 | cpumask_clear_cpu(smp_processor_id(), &cpumask); | 410 | cpumask_clear_cpu(smp_processor_id(), &cpumask); |
175 | 411 | ||
176 | if (oops_in_progress) { | 412 | if (oops_in_progress) |
177 | /* | 413 | smp_emergency_stop(&cpumask); |
178 | * Give the other cpus the opportunity to complete | ||
179 | * outstanding interrupts before stopping them. | ||
180 | */ | ||
181 | end = get_clock() + (1000000UL << 12); | ||
182 | for_each_cpu(cpu, &cpumask) { | ||
183 | set_bit(ec_stop_cpu, (unsigned long *) | ||
184 | &lowcore_ptr[cpu]->ext_call_fast); | ||
185 | while (sigp(cpu, sigp_emergency_signal) == sigp_busy && | ||
186 | get_clock() < end) | ||
187 | cpu_relax(); | ||
188 | } | ||
189 | while (get_clock() < end) { | ||
190 | for_each_cpu(cpu, &cpumask) | ||
191 | if (cpu_stopped(cpu)) | ||
192 | cpumask_clear_cpu(cpu, &cpumask); | ||
193 | if (cpumask_empty(&cpumask)) | ||
194 | break; | ||
195 | cpu_relax(); | ||
196 | } | ||
197 | } | ||
198 | 414 | ||
199 | /* stop all processors */ | 415 | /* stop all processors */ |
200 | for_each_cpu(cpu, &cpumask) { | 416 | for_each_cpu(cpu, &cpumask) { |
201 | while (sigp(cpu, sigp_stop) == sigp_busy) | 417 | struct pcpu *pcpu = pcpu_devices + cpu; |
202 | cpu_relax(); | 418 | pcpu_sigp_retry(pcpu, sigp_stop, 0); |
203 | while (!cpu_stopped(cpu)) | 419 | while (!pcpu_stopped(pcpu)) |
204 | cpu_relax(); | 420 | cpu_relax(); |
205 | } | 421 | } |
206 | } | 422 | } |
207 | 423 | ||
208 | /* | 424 | /* |
425 | * Stop the current cpu. | ||
426 | */ | ||
427 | void smp_stop_cpu(void) | ||
428 | { | ||
429 | pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0); | ||
430 | for (;;) ; | ||
431 | } | ||
432 | |||
433 | /* | ||
209 | * This is the main routine where commands issued by other | 434 | * This is the main routine where commands issued by other |
210 | * cpus are handled. | 435 | * cpus are handled. |
211 | */ | 436 | */ |
212 | |||
213 | static void do_ext_call_interrupt(unsigned int ext_int_code, | 437 | static void do_ext_call_interrupt(unsigned int ext_int_code, |
214 | unsigned int param32, unsigned long param64) | 438 | unsigned int param32, unsigned long param64) |
215 | { | 439 | { |
216 | unsigned long bits; | 440 | unsigned long bits; |
441 | int cpu; | ||
217 | 442 | ||
443 | cpu = smp_processor_id(); | ||
218 | if ((ext_int_code & 0xffff) == 0x1202) | 444 | if ((ext_int_code & 0xffff) == 0x1202) |
219 | kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++; | 445 | kstat_cpu(cpu).irqs[EXTINT_EXC]++; |
220 | else | 446 | else |
221 | kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++; | 447 | kstat_cpu(cpu).irqs[EXTINT_EMS]++; |
222 | /* | 448 | /* |
223 | * handle bit signal external calls | 449 | * handle bit signal external calls |
224 | */ | 450 | */ |
225 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | 451 | bits = xchg(&pcpu_devices[cpu].ec_mask, 0); |
226 | 452 | ||
227 | if (test_bit(ec_stop_cpu, &bits)) | 453 | if (test_bit(ec_stop_cpu, &bits)) |
228 | smp_stop_cpu(); | 454 | smp_stop_cpu(); |
@@ -238,38 +464,17 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, | |||
238 | 464 | ||
239 | } | 465 | } |
240 | 466 | ||
241 | /* | ||
242 | * Send an external call sigp to another cpu and return without waiting | ||
243 | * for its completion. | ||
244 | */ | ||
245 | static void smp_ext_bitcall(int cpu, int sig) | ||
246 | { | ||
247 | int order; | ||
248 | |||
249 | /* | ||
250 | * Set signaling bit in lowcore of target cpu and kick it | ||
251 | */ | ||
252 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | ||
253 | while (1) { | ||
254 | order = smp_vcpu_scheduled(cpu) ? | ||
255 | sigp_external_call : sigp_emergency_signal; | ||
256 | if (sigp(cpu, order) != sigp_busy) | ||
257 | break; | ||
258 | udelay(10); | ||
259 | } | ||
260 | } | ||
261 | |||
262 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 467 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
263 | { | 468 | { |
264 | int cpu; | 469 | int cpu; |
265 | 470 | ||
266 | for_each_cpu(cpu, mask) | 471 | for_each_cpu(cpu, mask) |
267 | smp_ext_bitcall(cpu, ec_call_function); | 472 | pcpu_ec_call(pcpu_devices + cpu, ec_call_function); |
268 | } | 473 | } |
269 | 474 | ||
270 | void arch_send_call_function_single_ipi(int cpu) | 475 | void arch_send_call_function_single_ipi(int cpu) |
271 | { | 476 | { |
272 | smp_ext_bitcall(cpu, ec_call_function_single); | 477 | pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); |
273 | } | 478 | } |
274 | 479 | ||
275 | #ifndef CONFIG_64BIT | 480 | #ifndef CONFIG_64BIT |
@@ -295,15 +500,16 @@ EXPORT_SYMBOL(smp_ptlb_all); | |||
295 | */ | 500 | */ |
296 | void smp_send_reschedule(int cpu) | 501 | void smp_send_reschedule(int cpu) |
297 | { | 502 | { |
298 | smp_ext_bitcall(cpu, ec_schedule); | 503 | pcpu_ec_call(pcpu_devices + cpu, ec_schedule); |
299 | } | 504 | } |
300 | 505 | ||
301 | /* | 506 | /* |
302 | * parameter area for the set/clear control bit callbacks | 507 | * parameter area for the set/clear control bit callbacks |
303 | */ | 508 | */ |
304 | struct ec_creg_mask_parms { | 509 | struct ec_creg_mask_parms { |
305 | unsigned long orvals[16]; | 510 | unsigned long orval; |
306 | unsigned long andvals[16]; | 511 | unsigned long andval; |
512 | int cr; | ||
307 | }; | 513 | }; |
308 | 514 | ||
309 | /* | 515 | /* |
@@ -313,11 +519,9 @@ static void smp_ctl_bit_callback(void *info) | |||
313 | { | 519 | { |
314 | struct ec_creg_mask_parms *pp = info; | 520 | struct ec_creg_mask_parms *pp = info; |
315 | unsigned long cregs[16]; | 521 | unsigned long cregs[16]; |
316 | int i; | ||
317 | 522 | ||
318 | __ctl_store(cregs, 0, 15); | 523 | __ctl_store(cregs, 0, 15); |
319 | for (i = 0; i <= 15; i++) | 524 | cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval; |
320 | cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; | ||
321 | __ctl_load(cregs, 0, 15); | 525 | __ctl_load(cregs, 0, 15); |
322 | } | 526 | } |
323 | 527 | ||
@@ -326,11 +530,8 @@ static void smp_ctl_bit_callback(void *info) | |||
326 | */ | 530 | */ |
327 | void smp_ctl_set_bit(int cr, int bit) | 531 | void smp_ctl_set_bit(int cr, int bit) |
328 | { | 532 | { |
329 | struct ec_creg_mask_parms parms; | 533 | struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr }; |
330 | 534 | ||
331 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | ||
332 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | ||
333 | parms.orvals[cr] = 1UL << bit; | ||
334 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); | 535 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
335 | } | 536 | } |
336 | EXPORT_SYMBOL(smp_ctl_set_bit); | 537 | EXPORT_SYMBOL(smp_ctl_set_bit); |
@@ -340,216 +541,175 @@ EXPORT_SYMBOL(smp_ctl_set_bit); | |||
340 | */ | 541 | */ |
341 | void smp_ctl_clear_bit(int cr, int bit) | 542 | void smp_ctl_clear_bit(int cr, int bit) |
342 | { | 543 | { |
343 | struct ec_creg_mask_parms parms; | 544 | struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr }; |
344 | 545 | ||
345 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | ||
346 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | ||
347 | parms.andvals[cr] = ~(1UL << bit); | ||
348 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); | 546 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
349 | } | 547 | } |
350 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 548 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
351 | 549 | ||
352 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) | 550 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) |
353 | 551 | ||
354 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | 552 | struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; |
553 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); | ||
554 | |||
555 | static void __init smp_get_save_area(int cpu, u16 address) | ||
355 | { | 556 | { |
356 | if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) | 557 | void *lc = pcpu_devices[0].lowcore; |
357 | return; | 558 | struct save_area *save_area; |
559 | |||
358 | if (is_kdump_kernel()) | 560 | if (is_kdump_kernel()) |
359 | return; | 561 | return; |
562 | if (!OLDMEM_BASE && (address == boot_cpu_address || | ||
563 | ipl_info.type != IPL_TYPE_FCP_DUMP)) | ||
564 | return; | ||
360 | if (cpu >= NR_CPUS) { | 565 | if (cpu >= NR_CPUS) { |
361 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " | 566 | pr_warning("CPU %i exceeds the maximum %i and is excluded " |
362 | "the dump\n", cpu, NR_CPUS - 1); | 567 | "from the dump\n", cpu, NR_CPUS - 1); |
363 | return; | 568 | return; |
364 | } | 569 | } |
365 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); | 570 | save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL); |
366 | while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy) | 571 | if (!save_area) |
367 | cpu_relax(); | 572 | panic("could not allocate memory for save area\n"); |
368 | memcpy_real(zfcpdump_save_areas[cpu], | 573 | zfcpdump_save_areas[cpu] = save_area; |
369 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, | 574 | #ifdef CONFIG_CRASH_DUMP |
370 | sizeof(struct save_area)); | 575 | if (address == boot_cpu_address) { |
576 | /* Copy the registers of the boot cpu. */ | ||
577 | copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), | ||
578 | SAVE_AREA_BASE - PAGE_SIZE, 0); | ||
579 | return; | ||
580 | } | ||
581 | #endif | ||
582 | /* Get the registers of a non-boot cpu. */ | ||
583 | __pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL); | ||
584 | memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); | ||
371 | } | 585 | } |
372 | 586 | ||
373 | struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; | 587 | int smp_store_status(int cpu) |
374 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); | ||
375 | |||
376 | #else | ||
377 | |||
378 | static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } | ||
379 | |||
380 | #endif /* CONFIG_ZFCPDUMP */ | ||
381 | |||
382 | static int cpu_known(int cpu_id) | ||
383 | { | 588 | { |
384 | int cpu; | 589 | struct pcpu *pcpu; |
385 | 590 | ||
386 | for_each_present_cpu(cpu) { | 591 | pcpu = pcpu_devices + cpu; |
387 | if (__cpu_logical_map[cpu] == cpu_id) | 592 | if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status, |
388 | return 1; | 593 | 0, NULL) != sigp_order_code_accepted) |
389 | } | 594 | return -EIO; |
390 | return 0; | 595 | return 0; |
391 | } | 596 | } |
392 | 597 | ||
393 | static int smp_rescan_cpus_sigp(cpumask_t avail) | 598 | #else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ |
394 | { | ||
395 | int cpu_id, logical_cpu; | ||
396 | 599 | ||
397 | logical_cpu = cpumask_first(&avail); | 600 | static inline void smp_get_save_area(int cpu, u16 address) { } |
398 | if (logical_cpu >= nr_cpu_ids) | 601 | |
399 | return 0; | 602 | #endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ |
400 | for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) { | ||
401 | if (cpu_known(cpu_id)) | ||
402 | continue; | ||
403 | __cpu_logical_map[logical_cpu] = cpu_id; | ||
404 | cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN); | ||
405 | if (!cpu_stopped(logical_cpu)) | ||
406 | continue; | ||
407 | set_cpu_present(logical_cpu, true); | ||
408 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; | ||
409 | logical_cpu = cpumask_next(logical_cpu, &avail); | ||
410 | if (logical_cpu >= nr_cpu_ids) | ||
411 | break; | ||
412 | } | ||
413 | return 0; | ||
414 | } | ||
415 | 603 | ||
416 | static int smp_rescan_cpus_sclp(cpumask_t avail) | 604 | static struct sclp_cpu_info *smp_get_cpu_info(void) |
417 | { | 605 | { |
606 | static int use_sigp_detection; | ||
418 | struct sclp_cpu_info *info; | 607 | struct sclp_cpu_info *info; |
419 | int cpu_id, logical_cpu, cpu; | 608 | int address; |
420 | int rc; | 609 | |
421 | 610 | info = kzalloc(sizeof(*info), GFP_KERNEL); | |
422 | logical_cpu = cpumask_first(&avail); | 611 | if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { |
423 | if (logical_cpu >= nr_cpu_ids) | 612 | use_sigp_detection = 1; |
424 | return 0; | 613 | for (address = 0; address <= MAX_CPU_ADDRESS; address++) { |
425 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 614 | if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) == |
426 | if (!info) | 615 | sigp_not_operational) |
427 | return -ENOMEM; | 616 | continue; |
428 | rc = sclp_get_cpu_info(info); | 617 | info->cpu[info->configured].address = address; |
429 | if (rc) | 618 | info->configured++; |
430 | goto out; | 619 | } |
431 | for (cpu = 0; cpu < info->combined; cpu++) { | 620 | info->combined = info->configured; |
432 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) | ||
433 | continue; | ||
434 | cpu_id = info->cpu[cpu].address; | ||
435 | if (cpu_known(cpu_id)) | ||
436 | continue; | ||
437 | __cpu_logical_map[logical_cpu] = cpu_id; | ||
438 | cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN); | ||
439 | set_cpu_present(logical_cpu, true); | ||
440 | if (cpu >= info->configured) | ||
441 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; | ||
442 | else | ||
443 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; | ||
444 | logical_cpu = cpumask_next(logical_cpu, &avail); | ||
445 | if (logical_cpu >= nr_cpu_ids) | ||
446 | break; | ||
447 | } | 621 | } |
448 | out: | 622 | return info; |
449 | kfree(info); | ||
450 | return rc; | ||
451 | } | 623 | } |
452 | 624 | ||
453 | static int __smp_rescan_cpus(void) | 625 | static int __devinit smp_add_present_cpu(int cpu); |
626 | |||
627 | static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info, | ||
628 | int sysfs_add) | ||
454 | { | 629 | { |
630 | struct pcpu *pcpu; | ||
455 | cpumask_t avail; | 631 | cpumask_t avail; |
632 | int cpu, nr, i; | ||
456 | 633 | ||
634 | nr = 0; | ||
457 | cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); | 635 | cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); |
458 | if (smp_use_sigp_detection) | 636 | cpu = cpumask_first(&avail); |
459 | return smp_rescan_cpus_sigp(avail); | 637 | for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { |
460 | else | 638 | if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) |
461 | return smp_rescan_cpus_sclp(avail); | 639 | continue; |
640 | if (pcpu_find_address(cpu_present_mask, info->cpu[i].address)) | ||
641 | continue; | ||
642 | pcpu = pcpu_devices + cpu; | ||
643 | pcpu->address = info->cpu[i].address; | ||
644 | pcpu->state = (cpu >= info->configured) ? | ||
645 | CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; | ||
646 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); | ||
647 | set_cpu_present(cpu, true); | ||
648 | if (sysfs_add && smp_add_present_cpu(cpu) != 0) | ||
649 | set_cpu_present(cpu, false); | ||
650 | else | ||
651 | nr++; | ||
652 | cpu = cpumask_next(cpu, &avail); | ||
653 | } | ||
654 | return nr; | ||
462 | } | 655 | } |
463 | 656 | ||
464 | static void __init smp_detect_cpus(void) | 657 | static void __init smp_detect_cpus(void) |
465 | { | 658 | { |
466 | unsigned int cpu, c_cpus, s_cpus; | 659 | unsigned int cpu, c_cpus, s_cpus; |
467 | struct sclp_cpu_info *info; | 660 | struct sclp_cpu_info *info; |
468 | u16 boot_cpu_addr, cpu_addr; | ||
469 | 661 | ||
470 | c_cpus = 1; | 662 | info = smp_get_cpu_info(); |
471 | s_cpus = 0; | ||
472 | boot_cpu_addr = __cpu_logical_map[0]; | ||
473 | info = kmalloc(sizeof(*info), GFP_KERNEL); | ||
474 | if (!info) | 663 | if (!info) |
475 | panic("smp_detect_cpus failed to allocate memory\n"); | 664 | panic("smp_detect_cpus failed to allocate memory\n"); |
476 | #ifdef CONFIG_CRASH_DUMP | ||
477 | if (OLDMEM_BASE && !is_kdump_kernel()) { | ||
478 | struct save_area *save_area; | ||
479 | |||
480 | save_area = kmalloc(sizeof(*save_area), GFP_KERNEL); | ||
481 | if (!save_area) | ||
482 | panic("could not allocate memory for save area\n"); | ||
483 | copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), | ||
484 | 0x200, 0); | ||
485 | zfcpdump_save_areas[0] = save_area; | ||
486 | } | ||
487 | #endif | ||
488 | /* Use sigp detection algorithm if sclp doesn't work. */ | ||
489 | if (sclp_get_cpu_info(info)) { | ||
490 | smp_use_sigp_detection = 1; | ||
491 | for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { | ||
492 | if (cpu == boot_cpu_addr) | ||
493 | continue; | ||
494 | if (!raw_cpu_stopped(cpu)) | ||
495 | continue; | ||
496 | smp_get_save_area(c_cpus, cpu); | ||
497 | c_cpus++; | ||
498 | } | ||
499 | goto out; | ||
500 | } | ||
501 | |||
502 | if (info->has_cpu_type) { | 665 | if (info->has_cpu_type) { |
503 | for (cpu = 0; cpu < info->combined; cpu++) { | 666 | for (cpu = 0; cpu < info->combined; cpu++) { |
504 | if (info->cpu[cpu].address == boot_cpu_addr) { | 667 | if (info->cpu[cpu].address != boot_cpu_address) |
505 | smp_cpu_type = info->cpu[cpu].type; | 668 | continue; |
506 | break; | 669 | /* The boot cpu dictates the cpu type. */ |
507 | } | 670 | boot_cpu_type = info->cpu[cpu].type; |
671 | break; | ||
508 | } | 672 | } |
509 | } | 673 | } |
510 | 674 | c_cpus = s_cpus = 0; | |
511 | for (cpu = 0; cpu < info->combined; cpu++) { | 675 | for (cpu = 0; cpu < info->combined; cpu++) { |
512 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) | 676 | if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) |
513 | continue; | 677 | continue; |
514 | cpu_addr = info->cpu[cpu].address; | 678 | if (cpu < info->configured) { |
515 | if (cpu_addr == boot_cpu_addr) | 679 | smp_get_save_area(c_cpus, info->cpu[cpu].address); |
516 | continue; | 680 | c_cpus++; |
517 | if (!raw_cpu_stopped(cpu_addr)) { | 681 | } else |
518 | s_cpus++; | 682 | s_cpus++; |
519 | continue; | ||
520 | } | ||
521 | smp_get_save_area(c_cpus, cpu_addr); | ||
522 | c_cpus++; | ||
523 | } | 683 | } |
524 | out: | ||
525 | kfree(info); | ||
526 | pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); | 684 | pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); |
527 | get_online_cpus(); | 685 | get_online_cpus(); |
528 | __smp_rescan_cpus(); | 686 | __smp_rescan_cpus(info, 0); |
529 | put_online_cpus(); | 687 | put_online_cpus(); |
688 | kfree(info); | ||
530 | } | 689 | } |
531 | 690 | ||
532 | /* | 691 | /* |
533 | * Activate a secondary processor. | 692 | * Activate a secondary processor. |
534 | */ | 693 | */ |
535 | int __cpuinit start_secondary(void *cpuvoid) | 694 | static void __cpuinit smp_start_secondary(void *cpuvoid) |
536 | { | 695 | { |
696 | S390_lowcore.last_update_clock = get_clock(); | ||
697 | S390_lowcore.restart_stack = (unsigned long) restart_stack; | ||
698 | S390_lowcore.restart_fn = (unsigned long) do_restart; | ||
699 | S390_lowcore.restart_data = 0; | ||
700 | S390_lowcore.restart_source = -1UL; | ||
701 | restore_access_regs(S390_lowcore.access_regs_save_area); | ||
702 | __ctl_load(S390_lowcore.cregs_save_area, 0, 15); | ||
703 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); | ||
537 | cpu_init(); | 704 | cpu_init(); |
538 | preempt_disable(); | 705 | preempt_disable(); |
539 | init_cpu_timer(); | 706 | init_cpu_timer(); |
540 | init_cpu_vtimer(); | 707 | init_cpu_vtimer(); |
541 | pfault_init(); | 708 | pfault_init(); |
542 | |||
543 | notify_cpu_starting(smp_processor_id()); | 709 | notify_cpu_starting(smp_processor_id()); |
544 | ipi_call_lock(); | 710 | ipi_call_lock(); |
545 | set_cpu_online(smp_processor_id(), true); | 711 | set_cpu_online(smp_processor_id(), true); |
546 | ipi_call_unlock(); | 712 | ipi_call_unlock(); |
547 | __ctl_clear_bit(0, 28); /* Disable lowcore protection */ | ||
548 | S390_lowcore.restart_psw.mask = | ||
549 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; | ||
550 | S390_lowcore.restart_psw.addr = | ||
551 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | ||
552 | __ctl_set_bit(0, 28); /* Enable lowcore protection */ | ||
553 | /* | 713 | /* |
554 | * Wait until the cpu which brought this one up marked it | 714 | * Wait until the cpu which brought this one up marked it |
555 | * active before enabling interrupts. | 715 | * active before enabling interrupts. |
@@ -559,7 +719,6 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
559 | local_irq_enable(); | 719 | local_irq_enable(); |
560 | /* cpu_idle will call schedule for us */ | 720 | /* cpu_idle will call schedule for us */ |
561 | cpu_idle(); | 721 | cpu_idle(); |
562 | return 0; | ||
563 | } | 722 | } |
564 | 723 | ||
565 | struct create_idle { | 724 | struct create_idle { |
@@ -578,82 +737,20 @@ static void __cpuinit smp_fork_idle(struct work_struct *work) | |||
578 | complete(&c_idle->done); | 737 | complete(&c_idle->done); |
579 | } | 738 | } |
580 | 739 | ||
581 | static int __cpuinit smp_alloc_lowcore(int cpu) | ||
582 | { | ||
583 | unsigned long async_stack, panic_stack; | ||
584 | struct _lowcore *lowcore; | ||
585 | |||
586 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); | ||
587 | if (!lowcore) | ||
588 | return -ENOMEM; | ||
589 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | ||
590 | panic_stack = __get_free_page(GFP_KERNEL); | ||
591 | if (!panic_stack || !async_stack) | ||
592 | goto out; | ||
593 | memcpy(lowcore, &S390_lowcore, 512); | ||
594 | memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); | ||
595 | lowcore->async_stack = async_stack + ASYNC_SIZE; | ||
596 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | ||
597 | lowcore->restart_psw.mask = | ||
598 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; | ||
599 | lowcore->restart_psw.addr = | ||
600 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; | ||
601 | if (user_mode != HOME_SPACE_MODE) | ||
602 | lowcore->restart_psw.mask |= PSW_ASC_HOME; | ||
603 | #ifndef CONFIG_64BIT | ||
604 | if (MACHINE_HAS_IEEE) { | ||
605 | unsigned long save_area; | ||
606 | |||
607 | save_area = get_zeroed_page(GFP_KERNEL); | ||
608 | if (!save_area) | ||
609 | goto out; | ||
610 | lowcore->extended_save_area_addr = (u32) save_area; | ||
611 | } | ||
612 | #else | ||
613 | if (vdso_alloc_per_cpu(cpu, lowcore)) | ||
614 | goto out; | ||
615 | #endif | ||
616 | lowcore_ptr[cpu] = lowcore; | ||
617 | return 0; | ||
618 | |||
619 | out: | ||
620 | free_page(panic_stack); | ||
621 | free_pages(async_stack, ASYNC_ORDER); | ||
622 | free_pages((unsigned long) lowcore, LC_ORDER); | ||
623 | return -ENOMEM; | ||
624 | } | ||
625 | |||
626 | static void smp_free_lowcore(int cpu) | ||
627 | { | ||
628 | struct _lowcore *lowcore; | ||
629 | |||
630 | lowcore = lowcore_ptr[cpu]; | ||
631 | #ifndef CONFIG_64BIT | ||
632 | if (MACHINE_HAS_IEEE) | ||
633 | free_page((unsigned long) lowcore->extended_save_area_addr); | ||
634 | #else | ||
635 | vdso_free_per_cpu(cpu, lowcore); | ||
636 | #endif | ||
637 | free_page(lowcore->panic_stack - PAGE_SIZE); | ||
638 | free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER); | ||
639 | free_pages((unsigned long) lowcore, LC_ORDER); | ||
640 | lowcore_ptr[cpu] = NULL; | ||
641 | } | ||
642 | |||
643 | /* Upping and downing of CPUs */ | 740 | /* Upping and downing of CPUs */ |
644 | int __cpuinit __cpu_up(unsigned int cpu) | 741 | int __cpuinit __cpu_up(unsigned int cpu) |
645 | { | 742 | { |
646 | struct _lowcore *cpu_lowcore; | ||
647 | struct create_idle c_idle; | 743 | struct create_idle c_idle; |
648 | struct task_struct *idle; | 744 | struct pcpu *pcpu; |
649 | struct stack_frame *sf; | 745 | int rc; |
650 | u32 lowcore; | ||
651 | int ccode; | ||
652 | 746 | ||
653 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) | 747 | pcpu = pcpu_devices + cpu; |
748 | if (pcpu->state != CPU_STATE_CONFIGURED) | ||
654 | return -EIO; | 749 | return -EIO; |
655 | idle = current_set[cpu]; | 750 | if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) != |
656 | if (!idle) { | 751 | sigp_order_code_accepted) |
752 | return -EIO; | ||
753 | if (!pcpu->idle) { | ||
657 | c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done); | 754 | c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done); |
658 | INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle); | 755 | INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle); |
659 | c_idle.cpu = cpu; | 756 | c_idle.cpu = cpu; |
@@ -661,68 +758,28 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
661 | wait_for_completion(&c_idle.done); | 758 | wait_for_completion(&c_idle.done); |
662 | if (IS_ERR(c_idle.idle)) | 759 | if (IS_ERR(c_idle.idle)) |
663 | return PTR_ERR(c_idle.idle); | 760 | return PTR_ERR(c_idle.idle); |
664 | idle = c_idle.idle; | 761 | pcpu->idle = c_idle.idle; |
665 | current_set[cpu] = c_idle.idle; | ||
666 | } | 762 | } |
667 | init_idle(idle, cpu); | 763 | init_idle(pcpu->idle, cpu); |
668 | if (smp_alloc_lowcore(cpu)) | 764 | rc = pcpu_alloc_lowcore(pcpu, cpu); |
669 | return -ENOMEM; | 765 | if (rc) |
670 | do { | 766 | return rc; |
671 | ccode = sigp(cpu, sigp_initial_cpu_reset); | 767 | pcpu_prepare_secondary(pcpu, cpu); |
672 | if (ccode == sigp_busy) | 768 | pcpu_attach_task(pcpu, pcpu->idle); |
673 | udelay(10); | 769 | pcpu_start_fn(pcpu, smp_start_secondary, NULL); |
674 | if (ccode == sigp_not_operational) | ||
675 | goto err_out; | ||
676 | } while (ccode == sigp_busy); | ||
677 | |||
678 | lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; | ||
679 | while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) | ||
680 | udelay(10); | ||
681 | |||
682 | cpu_lowcore = lowcore_ptr[cpu]; | ||
683 | cpu_lowcore->kernel_stack = (unsigned long) | ||
684 | task_stack_page(idle) + THREAD_SIZE; | ||
685 | cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle); | ||
686 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack | ||
687 | - sizeof(struct pt_regs) | ||
688 | - sizeof(struct stack_frame)); | ||
689 | memset(sf, 0, sizeof(struct stack_frame)); | ||
690 | sf->gprs[9] = (unsigned long) sf; | ||
691 | cpu_lowcore->gpregs_save_area[15] = (unsigned long) sf; | ||
692 | __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); | ||
693 | atomic_inc(&init_mm.context.attach_count); | ||
694 | asm volatile( | ||
695 | " stam 0,15,0(%0)" | ||
696 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); | ||
697 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; | ||
698 | cpu_lowcore->current_task = (unsigned long) idle; | ||
699 | cpu_lowcore->cpu_nr = cpu; | ||
700 | cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; | ||
701 | cpu_lowcore->machine_flags = S390_lowcore.machine_flags; | ||
702 | cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; | ||
703 | memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list, | ||
704 | MAX_FACILITY_BIT/8); | ||
705 | eieio(); | ||
706 | |||
707 | while (sigp(cpu, sigp_restart) == sigp_busy) | ||
708 | udelay(10); | ||
709 | |||
710 | while (!cpu_online(cpu)) | 770 | while (!cpu_online(cpu)) |
711 | cpu_relax(); | 771 | cpu_relax(); |
712 | return 0; | 772 | return 0; |
713 | |||
714 | err_out: | ||
715 | smp_free_lowcore(cpu); | ||
716 | return -EIO; | ||
717 | } | 773 | } |
718 | 774 | ||
719 | static int __init setup_possible_cpus(char *s) | 775 | static int __init setup_possible_cpus(char *s) |
720 | { | 776 | { |
721 | int pcpus, cpu; | 777 | int max, cpu; |
722 | 778 | ||
723 | pcpus = simple_strtoul(s, NULL, 0); | 779 | if (kstrtoint(s, 0, &max) < 0) |
780 | return 0; | ||
724 | init_cpu_possible(cpumask_of(0)); | 781 | init_cpu_possible(cpumask_of(0)); |
725 | for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) | 782 | for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++) |
726 | set_cpu_possible(cpu, true); | 783 | set_cpu_possible(cpu, true); |
727 | return 0; | 784 | return 0; |
728 | } | 785 | } |
@@ -732,113 +789,67 @@ early_param("possible_cpus", setup_possible_cpus); | |||
732 | 789 | ||
733 | int __cpu_disable(void) | 790 | int __cpu_disable(void) |
734 | { | 791 | { |
735 | struct ec_creg_mask_parms cr_parms; | 792 | unsigned long cregs[16]; |
736 | int cpu = smp_processor_id(); | ||
737 | |||
738 | set_cpu_online(cpu, false); | ||
739 | 793 | ||
740 | /* Disable pfault pseudo page faults on this cpu. */ | 794 | set_cpu_online(smp_processor_id(), false); |
795 | /* Disable pseudo page faults on this cpu. */ | ||
741 | pfault_fini(); | 796 | pfault_fini(); |
742 | 797 | /* Disable interrupt sources via control register. */ | |
743 | memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); | 798 | __ctl_store(cregs, 0, 15); |
744 | memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); | 799 | cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */ |
745 | 800 | cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ | |
746 | /* disable all external interrupts */ | 801 | cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ |
747 | cr_parms.orvals[0] = 0; | 802 | __ctl_load(cregs, 0, 15); |
748 | cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 | | ||
749 | 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 | | ||
750 | 1 << 4); | ||
751 | /* disable all I/O interrupts */ | ||
752 | cr_parms.orvals[6] = 0; | ||
753 | cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | | ||
754 | 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); | ||
755 | /* disable most machine checks */ | ||
756 | cr_parms.orvals[14] = 0; | ||
757 | cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | | ||
758 | 1 << 25 | 1 << 24); | ||
759 | |||
760 | smp_ctl_bit_callback(&cr_parms); | ||
761 | |||
762 | return 0; | 803 | return 0; |
763 | } | 804 | } |
764 | 805 | ||
765 | void __cpu_die(unsigned int cpu) | 806 | void __cpu_die(unsigned int cpu) |
766 | { | 807 | { |
808 | struct pcpu *pcpu; | ||
809 | |||
767 | /* Wait until target cpu is down */ | 810 | /* Wait until target cpu is down */ |
768 | while (!cpu_stopped(cpu)) | 811 | pcpu = pcpu_devices + cpu; |
812 | while (!pcpu_stopped(pcpu)) | ||
769 | cpu_relax(); | 813 | cpu_relax(); |
770 | while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) | 814 | pcpu_free_lowcore(pcpu); |
771 | udelay(10); | ||
772 | smp_free_lowcore(cpu); | ||
773 | atomic_dec(&init_mm.context.attach_count); | 815 | atomic_dec(&init_mm.context.attach_count); |
774 | } | 816 | } |
775 | 817 | ||
776 | void __noreturn cpu_die(void) | 818 | void __noreturn cpu_die(void) |
777 | { | 819 | { |
778 | idle_task_exit(); | 820 | idle_task_exit(); |
779 | while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) | 821 | pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0); |
780 | cpu_relax(); | 822 | for (;;) ; |
781 | for (;;); | ||
782 | } | 823 | } |
783 | 824 | ||
784 | #endif /* CONFIG_HOTPLUG_CPU */ | 825 | #endif /* CONFIG_HOTPLUG_CPU */ |
785 | 826 | ||
786 | void __init smp_prepare_cpus(unsigned int max_cpus) | 827 | void __init smp_prepare_cpus(unsigned int max_cpus) |
787 | { | 828 | { |
788 | #ifndef CONFIG_64BIT | ||
789 | unsigned long save_area = 0; | ||
790 | #endif | ||
791 | unsigned long async_stack, panic_stack; | ||
792 | struct _lowcore *lowcore; | ||
793 | |||
794 | smp_detect_cpus(); | ||
795 | |||
796 | /* request the 0x1201 emergency signal external interrupt */ | 829 | /* request the 0x1201 emergency signal external interrupt */ |
797 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 830 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
798 | panic("Couldn't request external interrupt 0x1201"); | 831 | panic("Couldn't request external interrupt 0x1201"); |
799 | /* request the 0x1202 external call external interrupt */ | 832 | /* request the 0x1202 external call external interrupt */ |
800 | if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) | 833 | if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) |
801 | panic("Couldn't request external interrupt 0x1202"); | 834 | panic("Couldn't request external interrupt 0x1202"); |
802 | 835 | smp_detect_cpus(); | |
803 | /* Reallocate current lowcore, but keep its contents. */ | ||
804 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); | ||
805 | panic_stack = __get_free_page(GFP_KERNEL); | ||
806 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | ||
807 | BUG_ON(!lowcore || !panic_stack || !async_stack); | ||
808 | #ifndef CONFIG_64BIT | ||
809 | if (MACHINE_HAS_IEEE) | ||
810 | save_area = get_zeroed_page(GFP_KERNEL); | ||
811 | #endif | ||
812 | local_irq_disable(); | ||
813 | local_mcck_disable(); | ||
814 | lowcore_ptr[smp_processor_id()] = lowcore; | ||
815 | *lowcore = S390_lowcore; | ||
816 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | ||
817 | lowcore->async_stack = async_stack + ASYNC_SIZE; | ||
818 | #ifndef CONFIG_64BIT | ||
819 | if (MACHINE_HAS_IEEE) | ||
820 | lowcore->extended_save_area_addr = (u32) save_area; | ||
821 | #endif | ||
822 | set_prefix((u32)(unsigned long) lowcore); | ||
823 | local_mcck_enable(); | ||
824 | local_irq_enable(); | ||
825 | #ifdef CONFIG_64BIT | ||
826 | if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) | ||
827 | BUG(); | ||
828 | #endif | ||
829 | } | 836 | } |
830 | 837 | ||
831 | void __init smp_prepare_boot_cpu(void) | 838 | void __init smp_prepare_boot_cpu(void) |
832 | { | 839 | { |
833 | BUG_ON(smp_processor_id() != 0); | 840 | struct pcpu *pcpu = pcpu_devices; |
834 | 841 | ||
835 | current_thread_info()->cpu = 0; | 842 | boot_cpu_address = stap(); |
836 | set_cpu_present(0, true); | 843 | pcpu->idle = current; |
837 | set_cpu_online(0, true); | 844 | pcpu->state = CPU_STATE_CONFIGURED; |
845 | pcpu->address = boot_cpu_address; | ||
846 | pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); | ||
847 | pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE; | ||
848 | pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE; | ||
838 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | 849 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
839 | current_set[0] = current; | ||
840 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; | ||
841 | cpu_set_polarization(0, POLARIZATION_UNKNOWN); | 850 | cpu_set_polarization(0, POLARIZATION_UNKNOWN); |
851 | set_cpu_present(0, true); | ||
852 | set_cpu_online(0, true); | ||
842 | } | 853 | } |
843 | 854 | ||
844 | void __init smp_cpus_done(unsigned int max_cpus) | 855 | void __init smp_cpus_done(unsigned int max_cpus) |
@@ -848,7 +859,6 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
848 | void __init smp_setup_processor_id(void) | 859 | void __init smp_setup_processor_id(void) |
849 | { | 860 | { |
850 | S390_lowcore.cpu_nr = 0; | 861 | S390_lowcore.cpu_nr = 0; |
851 | __cpu_logical_map[0] = stap(); | ||
852 | } | 862 | } |
853 | 863 | ||
854 | /* | 864 | /* |
@@ -864,56 +874,57 @@ int setup_profiling_timer(unsigned int multiplier) | |||
864 | 874 | ||
865 | #ifdef CONFIG_HOTPLUG_CPU | 875 | #ifdef CONFIG_HOTPLUG_CPU |
866 | static ssize_t cpu_configure_show(struct device *dev, | 876 | static ssize_t cpu_configure_show(struct device *dev, |
867 | struct device_attribute *attr, char *buf) | 877 | struct device_attribute *attr, char *buf) |
868 | { | 878 | { |
869 | ssize_t count; | 879 | ssize_t count; |
870 | 880 | ||
871 | mutex_lock(&smp_cpu_state_mutex); | 881 | mutex_lock(&smp_cpu_state_mutex); |
872 | count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]); | 882 | count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state); |
873 | mutex_unlock(&smp_cpu_state_mutex); | 883 | mutex_unlock(&smp_cpu_state_mutex); |
874 | return count; | 884 | return count; |
875 | } | 885 | } |
876 | 886 | ||
877 | static ssize_t cpu_configure_store(struct device *dev, | 887 | static ssize_t cpu_configure_store(struct device *dev, |
878 | struct device_attribute *attr, | 888 | struct device_attribute *attr, |
879 | const char *buf, size_t count) | 889 | const char *buf, size_t count) |
880 | { | 890 | { |
881 | int cpu = dev->id; | 891 | struct pcpu *pcpu; |
882 | int val, rc; | 892 | int cpu, val, rc; |
883 | char delim; | 893 | char delim; |
884 | 894 | ||
885 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | 895 | if (sscanf(buf, "%d %c", &val, &delim) != 1) |
886 | return -EINVAL; | 896 | return -EINVAL; |
887 | if (val != 0 && val != 1) | 897 | if (val != 0 && val != 1) |
888 | return -EINVAL; | 898 | return -EINVAL; |
889 | |||
890 | get_online_cpus(); | 899 | get_online_cpus(); |
891 | mutex_lock(&smp_cpu_state_mutex); | 900 | mutex_lock(&smp_cpu_state_mutex); |
892 | rc = -EBUSY; | 901 | rc = -EBUSY; |
893 | /* disallow configuration changes of online cpus and cpu 0 */ | 902 | /* disallow configuration changes of online cpus and cpu 0 */ |
903 | cpu = dev->id; | ||
894 | if (cpu_online(cpu) || cpu == 0) | 904 | if (cpu_online(cpu) || cpu == 0) |
895 | goto out; | 905 | goto out; |
906 | pcpu = pcpu_devices + cpu; | ||
896 | rc = 0; | 907 | rc = 0; |
897 | switch (val) { | 908 | switch (val) { |
898 | case 0: | 909 | case 0: |
899 | if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { | 910 | if (pcpu->state != CPU_STATE_CONFIGURED) |
900 | rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); | 911 | break; |
901 | if (!rc) { | 912 | rc = sclp_cpu_deconfigure(pcpu->address); |
902 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; | 913 | if (rc) |
903 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); | 914 | break; |
904 | topology_expect_change(); | 915 | pcpu->state = CPU_STATE_STANDBY; |
905 | } | 916 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
906 | } | 917 | topology_expect_change(); |
907 | break; | 918 | break; |
908 | case 1: | 919 | case 1: |
909 | if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { | 920 | if (pcpu->state != CPU_STATE_STANDBY) |
910 | rc = sclp_cpu_configure(__cpu_logical_map[cpu]); | 921 | break; |
911 | if (!rc) { | 922 | rc = sclp_cpu_configure(pcpu->address); |
912 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; | 923 | if (rc) |
913 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); | 924 | break; |
914 | topology_expect_change(); | 925 | pcpu->state = CPU_STATE_CONFIGURED; |
915 | } | 926 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
916 | } | 927 | topology_expect_change(); |
917 | break; | 928 | break; |
918 | default: | 929 | default: |
919 | break; | 930 | break; |
@@ -929,7 +940,7 @@ static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); | |||
929 | static ssize_t show_cpu_address(struct device *dev, | 940 | static ssize_t show_cpu_address(struct device *dev, |
930 | struct device_attribute *attr, char *buf) | 941 | struct device_attribute *attr, char *buf) |
931 | { | 942 | { |
932 | return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); | 943 | return sprintf(buf, "%d\n", pcpu_devices[dev->id].address); |
933 | } | 944 | } |
934 | static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); | 945 | static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); |
935 | 946 | ||
@@ -1021,7 +1032,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self, | |||
1021 | unsigned long action, void *hcpu) | 1032 | unsigned long action, void *hcpu) |
1022 | { | 1033 | { |
1023 | unsigned int cpu = (unsigned int)(long)hcpu; | 1034 | unsigned int cpu = (unsigned int)(long)hcpu; |
1024 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 1035 | struct cpu *c = &pcpu_devices[cpu].cpu; |
1025 | struct device *s = &c->dev; | 1036 | struct device *s = &c->dev; |
1026 | struct s390_idle_data *idle; | 1037 | struct s390_idle_data *idle; |
1027 | int err = 0; | 1038 | int err = 0; |
@@ -1047,7 +1058,7 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = { | |||
1047 | 1058 | ||
1048 | static int __devinit smp_add_present_cpu(int cpu) | 1059 | static int __devinit smp_add_present_cpu(int cpu) |
1049 | { | 1060 | { |
1050 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 1061 | struct cpu *c = &pcpu_devices[cpu].cpu; |
1051 | struct device *s = &c->dev; | 1062 | struct device *s = &c->dev; |
1052 | int rc; | 1063 | int rc; |
1053 | 1064 | ||
@@ -1085,29 +1096,21 @@ out: | |||
1085 | 1096 | ||
1086 | int __ref smp_rescan_cpus(void) | 1097 | int __ref smp_rescan_cpus(void) |
1087 | { | 1098 | { |
1088 | cpumask_t newcpus; | 1099 | struct sclp_cpu_info *info; |
1089 | int cpu; | 1100 | int nr; |
1090 | int rc; | ||
1091 | 1101 | ||
1102 | info = smp_get_cpu_info(); | ||
1103 | if (!info) | ||
1104 | return -ENOMEM; | ||
1092 | get_online_cpus(); | 1105 | get_online_cpus(); |
1093 | mutex_lock(&smp_cpu_state_mutex); | 1106 | mutex_lock(&smp_cpu_state_mutex); |
1094 | cpumask_copy(&newcpus, cpu_present_mask); | 1107 | nr = __smp_rescan_cpus(info, 1); |
1095 | rc = __smp_rescan_cpus(); | ||
1096 | if (rc) | ||
1097 | goto out; | ||
1098 | cpumask_andnot(&newcpus, cpu_present_mask, &newcpus); | ||
1099 | for_each_cpu(cpu, &newcpus) { | ||
1100 | rc = smp_add_present_cpu(cpu); | ||
1101 | if (rc) | ||
1102 | set_cpu_present(cpu, false); | ||
1103 | } | ||
1104 | rc = 0; | ||
1105 | out: | ||
1106 | mutex_unlock(&smp_cpu_state_mutex); | 1108 | mutex_unlock(&smp_cpu_state_mutex); |
1107 | put_online_cpus(); | 1109 | put_online_cpus(); |
1108 | if (!cpumask_empty(&newcpus)) | 1110 | kfree(info); |
1111 | if (nr) | ||
1109 | topology_schedule_update(); | 1112 | topology_schedule_update(); |
1110 | return rc; | 1113 | return 0; |
1111 | } | 1114 | } |
1112 | 1115 | ||
1113 | static ssize_t __ref rescan_store(struct device *dev, | 1116 | static ssize_t __ref rescan_store(struct device *dev, |