diff options
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r-- | arch/s390/kernel/smp.c | 369 |
1 files changed, 204 insertions, 165 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 97764f710bb7..3754e2031b39 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -1,12 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/smp.c | 2 | * arch/s390/kernel/smp.c |
3 | * | 3 | * |
4 | * Copyright (C) IBM Corp. 1999,2006 | 4 | * Copyright IBM Corp. 1999,2007 |
5 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | 5 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), |
6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
7 | * Heiko Carstens (heiko.carstens@de.ibm.com) | 7 | * Heiko Carstens (heiko.carstens@de.ibm.com) |
8 | * | 8 | * |
9 | * based on other smp stuff by | 9 | * based on other smp stuff by |
10 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> | 10 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> |
11 | * (c) 1998 Ingo Molnar | 11 | * (c) 1998 Ingo Molnar |
12 | * | 12 | * |
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
32 | #include <linux/cpu.h> | 32 | #include <linux/cpu.h> |
33 | #include <linux/timex.h> | 33 | #include <linux/timex.h> |
34 | #include <linux/bootmem.h> | ||
34 | #include <asm/ipl.h> | 35 | #include <asm/ipl.h> |
35 | #include <asm/setup.h> | 36 | #include <asm/setup.h> |
36 | #include <asm/sigp.h> | 37 | #include <asm/sigp.h> |
@@ -40,17 +41,19 @@ | |||
40 | #include <asm/cpcmd.h> | 41 | #include <asm/cpcmd.h> |
41 | #include <asm/tlbflush.h> | 42 | #include <asm/tlbflush.h> |
42 | #include <asm/timer.h> | 43 | #include <asm/timer.h> |
43 | 44 | #include <asm/lowcore.h> | |
44 | extern volatile int __cpu_logical_map[]; | ||
45 | 45 | ||
46 | /* | 46 | /* |
47 | * An array with a pointer the lowcore of every CPU. | 47 | * An array with a pointer the lowcore of every CPU. |
48 | */ | 48 | */ |
49 | |||
50 | struct _lowcore *lowcore_ptr[NR_CPUS]; | 49 | struct _lowcore *lowcore_ptr[NR_CPUS]; |
50 | EXPORT_SYMBOL(lowcore_ptr); | ||
51 | 51 | ||
52 | cpumask_t cpu_online_map = CPU_MASK_NONE; | 52 | cpumask_t cpu_online_map = CPU_MASK_NONE; |
53 | EXPORT_SYMBOL(cpu_online_map); | ||
54 | |||
53 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | 55 | cpumask_t cpu_possible_map = CPU_MASK_NONE; |
56 | EXPORT_SYMBOL(cpu_possible_map); | ||
54 | 57 | ||
55 | static struct task_struct *current_set[NR_CPUS]; | 58 | static struct task_struct *current_set[NR_CPUS]; |
56 | 59 | ||
@@ -70,7 +73,7 @@ struct call_data_struct { | |||
70 | int wait; | 73 | int wait; |
71 | }; | 74 | }; |
72 | 75 | ||
73 | static struct call_data_struct * call_data; | 76 | static struct call_data_struct *call_data; |
74 | 77 | ||
75 | /* | 78 | /* |
76 | * 'Call function' interrupt callback | 79 | * 'Call function' interrupt callback |
@@ -150,8 +153,8 @@ out: | |||
150 | * | 153 | * |
151 | * Run a function on all other CPUs. | 154 | * Run a function on all other CPUs. |
152 | * | 155 | * |
153 | * You must not call this function with disabled interrupts or from a | 156 | * You must not call this function with disabled interrupts, from a |
154 | * hardware interrupt handler. You may call it from a bottom half. | 157 | * hardware interrupt handler or from a bottom half. |
155 | */ | 158 | */ |
156 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | 159 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, |
157 | int wait) | 160 | int wait) |
@@ -177,11 +180,11 @@ EXPORT_SYMBOL(smp_call_function); | |||
177 | * | 180 | * |
178 | * Run a function on one processor. | 181 | * Run a function on one processor. |
179 | * | 182 | * |
180 | * You must not call this function with disabled interrupts or from a | 183 | * You must not call this function with disabled interrupts, from a |
181 | * hardware interrupt handler. You may call it from a bottom half. | 184 | * hardware interrupt handler or from a bottom half. |
182 | */ | 185 | */ |
183 | int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, | 186 | int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, |
184 | int wait, int cpu) | 187 | int wait, int cpu) |
185 | { | 188 | { |
186 | cpumask_t map = CPU_MASK_NONE; | 189 | cpumask_t map = CPU_MASK_NONE; |
187 | 190 | ||
@@ -195,9 +198,9 @@ EXPORT_SYMBOL(smp_call_function_on); | |||
195 | 198 | ||
196 | static void do_send_stop(void) | 199 | static void do_send_stop(void) |
197 | { | 200 | { |
198 | int cpu, rc; | 201 | int cpu, rc; |
199 | 202 | ||
200 | /* stop all processors */ | 203 | /* stop all processors */ |
201 | for_each_online_cpu(cpu) { | 204 | for_each_online_cpu(cpu) { |
202 | if (cpu == smp_processor_id()) | 205 | if (cpu == smp_processor_id()) |
203 | continue; | 206 | continue; |
@@ -209,9 +212,9 @@ static void do_send_stop(void) | |||
209 | 212 | ||
210 | static void do_store_status(void) | 213 | static void do_store_status(void) |
211 | { | 214 | { |
212 | int cpu, rc; | 215 | int cpu, rc; |
213 | 216 | ||
214 | /* store status of all processors in their lowcores (real 0) */ | 217 | /* store status of all processors in their lowcores (real 0) */ |
215 | for_each_online_cpu(cpu) { | 218 | for_each_online_cpu(cpu) { |
216 | if (cpu == smp_processor_id()) | 219 | if (cpu == smp_processor_id()) |
217 | continue; | 220 | continue; |
@@ -219,8 +222,8 @@ static void do_store_status(void) | |||
219 | rc = signal_processor_p( | 222 | rc = signal_processor_p( |
220 | (__u32)(unsigned long) lowcore_ptr[cpu], cpu, | 223 | (__u32)(unsigned long) lowcore_ptr[cpu], cpu, |
221 | sigp_store_status_at_address); | 224 | sigp_store_status_at_address); |
222 | } while(rc == sigp_busy); | 225 | } while (rc == sigp_busy); |
223 | } | 226 | } |
224 | } | 227 | } |
225 | 228 | ||
226 | static void do_wait_for_stop(void) | 229 | static void do_wait_for_stop(void) |
@@ -231,7 +234,7 @@ static void do_wait_for_stop(void) | |||
231 | for_each_online_cpu(cpu) { | 234 | for_each_online_cpu(cpu) { |
232 | if (cpu == smp_processor_id()) | 235 | if (cpu == smp_processor_id()) |
233 | continue; | 236 | continue; |
234 | while(!smp_cpu_not_running(cpu)) | 237 | while (!smp_cpu_not_running(cpu)) |
235 | cpu_relax(); | 238 | cpu_relax(); |
236 | } | 239 | } |
237 | } | 240 | } |
@@ -245,7 +248,7 @@ void smp_send_stop(void) | |||
245 | /* Disable all interrupts/machine checks */ | 248 | /* Disable all interrupts/machine checks */ |
246 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); | 249 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); |
247 | 250 | ||
248 | /* write magic number to zero page (absolute 0) */ | 251 | /* write magic number to zero page (absolute 0) */ |
249 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; | 252 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; |
250 | 253 | ||
251 | /* stop other processors. */ | 254 | /* stop other processors. */ |
@@ -261,8 +264,7 @@ void smp_send_stop(void) | |||
261 | /* | 264 | /* |
262 | * Reboot, halt and power_off routines for SMP. | 265 | * Reboot, halt and power_off routines for SMP. |
263 | */ | 266 | */ |
264 | 267 | void machine_restart_smp(char *__unused) | |
265 | void machine_restart_smp(char * __unused) | ||
266 | { | 268 | { |
267 | smp_send_stop(); | 269 | smp_send_stop(); |
268 | do_reipl(); | 270 | do_reipl(); |
@@ -293,17 +295,17 @@ void machine_power_off_smp(void) | |||
293 | 295 | ||
294 | static void do_ext_call_interrupt(__u16 code) | 296 | static void do_ext_call_interrupt(__u16 code) |
295 | { | 297 | { |
296 | unsigned long bits; | 298 | unsigned long bits; |
297 | 299 | ||
298 | /* | 300 | /* |
299 | * handle bit signal external calls | 301 | * handle bit signal external calls |
300 | * | 302 | * |
301 | * For the ec_schedule signal we have to do nothing. All the work | 303 | * For the ec_schedule signal we have to do nothing. All the work |
302 | * is done automatically when we return from the interrupt. | 304 | * is done automatically when we return from the interrupt. |
303 | */ | 305 | */ |
304 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | 306 | bits = xchg(&S390_lowcore.ext_call_fast, 0); |
305 | 307 | ||
306 | if (test_bit(ec_call_function, &bits)) | 308 | if (test_bit(ec_call_function, &bits)) |
307 | do_call_function(); | 309 | do_call_function(); |
308 | } | 310 | } |
309 | 311 | ||
@@ -313,11 +315,11 @@ static void do_ext_call_interrupt(__u16 code) | |||
313 | */ | 315 | */ |
314 | static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | 316 | static void smp_ext_bitcall(int cpu, ec_bit_sig sig) |
315 | { | 317 | { |
316 | /* | 318 | /* |
317 | * Set signaling bit in lowcore of target cpu and kick it | 319 | * Set signaling bit in lowcore of target cpu and kick it |
318 | */ | 320 | */ |
319 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | 321 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); |
320 | while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) | 322 | while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) |
321 | udelay(10); | 323 | udelay(10); |
322 | } | 324 | } |
323 | 325 | ||
@@ -332,7 +334,7 @@ void smp_ptlb_callback(void *info) | |||
332 | 334 | ||
333 | void smp_ptlb_all(void) | 335 | void smp_ptlb_all(void) |
334 | { | 336 | { |
335 | on_each_cpu(smp_ptlb_callback, NULL, 0, 1); | 337 | on_each_cpu(smp_ptlb_callback, NULL, 0, 1); |
336 | } | 338 | } |
337 | EXPORT_SYMBOL(smp_ptlb_all); | 339 | EXPORT_SYMBOL(smp_ptlb_all); |
338 | #endif /* ! CONFIG_64BIT */ | 340 | #endif /* ! CONFIG_64BIT */ |
@@ -344,7 +346,7 @@ EXPORT_SYMBOL(smp_ptlb_all); | |||
344 | */ | 346 | */ |
345 | void smp_send_reschedule(int cpu) | 347 | void smp_send_reschedule(int cpu) |
346 | { | 348 | { |
347 | smp_ext_bitcall(cpu, ec_schedule); | 349 | smp_ext_bitcall(cpu, ec_schedule); |
348 | } | 350 | } |
349 | 351 | ||
350 | /* | 352 | /* |
@@ -358,11 +360,12 @@ struct ec_creg_mask_parms { | |||
358 | /* | 360 | /* |
359 | * callback for setting/clearing control bits | 361 | * callback for setting/clearing control bits |
360 | */ | 362 | */ |
361 | static void smp_ctl_bit_callback(void *info) { | 363 | static void smp_ctl_bit_callback(void *info) |
364 | { | ||
362 | struct ec_creg_mask_parms *pp = info; | 365 | struct ec_creg_mask_parms *pp = info; |
363 | unsigned long cregs[16]; | 366 | unsigned long cregs[16]; |
364 | int i; | 367 | int i; |
365 | 368 | ||
366 | __ctl_store(cregs, 0, 15); | 369 | __ctl_store(cregs, 0, 15); |
367 | for (i = 0; i <= 15; i++) | 370 | for (i = 0; i <= 15; i++) |
368 | cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; | 371 | cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; |
@@ -381,6 +384,7 @@ void smp_ctl_set_bit(int cr, int bit) | |||
381 | parms.orvals[cr] = 1 << bit; | 384 | parms.orvals[cr] = 1 << bit; |
382 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); | 385 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); |
383 | } | 386 | } |
387 | EXPORT_SYMBOL(smp_ctl_set_bit); | ||
384 | 388 | ||
385 | /* | 389 | /* |
386 | * Clear a bit in a control register of all cpus | 390 | * Clear a bit in a control register of all cpus |
@@ -394,13 +398,72 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
394 | parms.andvals[cr] = ~(1L << bit); | 398 | parms.andvals[cr] = ~(1L << bit); |
395 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); | 399 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); |
396 | } | 400 | } |
401 | EXPORT_SYMBOL(smp_ctl_clear_bit); | ||
402 | |||
403 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) | ||
404 | |||
405 | /* | ||
406 | * zfcpdump_prefix_array holds prefix registers for the following scenario: | ||
407 | * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to | ||
408 | * save its prefix registers, since they get lost, when switching from 31 bit | ||
409 | * to 64 bit. | ||
410 | */ | ||
411 | unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ | ||
412 | __attribute__((__section__(".data"))); | ||
413 | |||
414 | static void __init smp_get_save_areas(void) | ||
415 | { | ||
416 | unsigned int cpu, cpu_num, rc; | ||
417 | __u16 boot_cpu_addr; | ||
418 | |||
419 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | ||
420 | return; | ||
421 | boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; | ||
422 | cpu_num = 1; | ||
423 | for (cpu = 0; cpu <= 65535; cpu++) { | ||
424 | if ((u16) cpu == boot_cpu_addr) | ||
425 | continue; | ||
426 | __cpu_logical_map[1] = (__u16) cpu; | ||
427 | if (signal_processor(1, sigp_sense) == sigp_not_operational) | ||
428 | continue; | ||
429 | if (cpu_num >= NR_CPUS) { | ||
430 | printk("WARNING: Registers for cpu %i are not " | ||
431 | "saved, since dump kernel was compiled with" | ||
432 | "NR_CPUS=%i!\n", cpu_num, NR_CPUS); | ||
433 | continue; | ||
434 | } | ||
435 | zfcpdump_save_areas[cpu_num] = | ||
436 | alloc_bootmem(sizeof(union save_area)); | ||
437 | while (1) { | ||
438 | rc = signal_processor(1, sigp_stop_and_store_status); | ||
439 | if (rc != sigp_busy) | ||
440 | break; | ||
441 | cpu_relax(); | ||
442 | } | ||
443 | memcpy(zfcpdump_save_areas[cpu_num], | ||
444 | (void *)(unsigned long) store_prefix() + | ||
445 | SAVE_AREA_BASE, SAVE_AREA_SIZE); | ||
446 | #ifdef __s390x__ | ||
447 | /* copy original prefix register */ | ||
448 | zfcpdump_save_areas[cpu_num]->s390x.pref_reg = | ||
449 | zfcpdump_prefix_array[cpu_num]; | ||
450 | #endif | ||
451 | cpu_num++; | ||
452 | } | ||
453 | } | ||
454 | |||
455 | union save_area *zfcpdump_save_areas[NR_CPUS + 1]; | ||
456 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); | ||
457 | |||
458 | #else | ||
459 | #define smp_get_save_areas() do { } while (0) | ||
460 | #endif | ||
397 | 461 | ||
398 | /* | 462 | /* |
399 | * Lets check how many CPUs we have. | 463 | * Lets check how many CPUs we have. |
400 | */ | 464 | */ |
401 | 465 | ||
402 | static unsigned int | 466 | static unsigned int __init smp_count_cpus(void) |
403 | __init smp_count_cpus(void) | ||
404 | { | 467 | { |
405 | unsigned int cpu, num_cpus; | 468 | unsigned int cpu, num_cpus; |
406 | __u16 boot_cpu_addr; | 469 | __u16 boot_cpu_addr; |
@@ -416,31 +479,30 @@ __init smp_count_cpus(void) | |||
416 | if ((__u16) cpu == boot_cpu_addr) | 479 | if ((__u16) cpu == boot_cpu_addr) |
417 | continue; | 480 | continue; |
418 | __cpu_logical_map[1] = (__u16) cpu; | 481 | __cpu_logical_map[1] = (__u16) cpu; |
419 | if (signal_processor(1, sigp_sense) == | 482 | if (signal_processor(1, sigp_sense) == sigp_not_operational) |
420 | sigp_not_operational) | ||
421 | continue; | 483 | continue; |
422 | num_cpus++; | 484 | num_cpus++; |
423 | } | 485 | } |
424 | 486 | ||
425 | printk("Detected %d CPU's\n",(int) num_cpus); | 487 | printk("Detected %d CPU's\n", (int) num_cpus); |
426 | printk("Boot cpu address %2X\n", boot_cpu_addr); | 488 | printk("Boot cpu address %2X\n", boot_cpu_addr); |
427 | 489 | ||
428 | return num_cpus; | 490 | return num_cpus; |
429 | } | 491 | } |
430 | 492 | ||
431 | /* | 493 | /* |
432 | * Activate a secondary processor. | 494 | * Activate a secondary processor. |
433 | */ | 495 | */ |
434 | int __devinit start_secondary(void *cpuvoid) | 496 | int __devinit start_secondary(void *cpuvoid) |
435 | { | 497 | { |
436 | /* Setup the cpu */ | 498 | /* Setup the cpu */ |
437 | cpu_init(); | 499 | cpu_init(); |
438 | preempt_disable(); | 500 | preempt_disable(); |
439 | /* Enable TOD clock interrupts on the secondary cpu. */ | 501 | /* Enable TOD clock interrupts on the secondary cpu. */ |
440 | init_cpu_timer(); | 502 | init_cpu_timer(); |
441 | #ifdef CONFIG_VIRT_TIMER | 503 | #ifdef CONFIG_VIRT_TIMER |
442 | /* Enable cpu timer interrupts on the secondary cpu. */ | 504 | /* Enable cpu timer interrupts on the secondary cpu. */ |
443 | init_cpu_vtimer(); | 505 | init_cpu_vtimer(); |
444 | #endif | 506 | #endif |
445 | /* Enable pfault pseudo page faults on this cpu. */ | 507 | /* Enable pfault pseudo page faults on this cpu. */ |
446 | pfault_init(); | 508 | pfault_init(); |
@@ -449,11 +511,11 @@ int __devinit start_secondary(void *cpuvoid) | |||
449 | cpu_set(smp_processor_id(), cpu_online_map); | 511 | cpu_set(smp_processor_id(), cpu_online_map); |
450 | /* Switch on interrupts */ | 512 | /* Switch on interrupts */ |
451 | local_irq_enable(); | 513 | local_irq_enable(); |
452 | /* Print info about this processor */ | 514 | /* Print info about this processor */ |
453 | print_cpu_info(&S390_lowcore.cpu_data); | 515 | print_cpu_info(&S390_lowcore.cpu_data); |
454 | /* cpu_idle will call schedule for us */ | 516 | /* cpu_idle will call schedule for us */ |
455 | cpu_idle(); | 517 | cpu_idle(); |
456 | return 0; | 518 | return 0; |
457 | } | 519 | } |
458 | 520 | ||
459 | static void __init smp_create_idle(unsigned int cpu) | 521 | static void __init smp_create_idle(unsigned int cpu) |
@@ -470,56 +532,13 @@ static void __init smp_create_idle(unsigned int cpu) | |||
470 | current_set[cpu] = p; | 532 | current_set[cpu] = p; |
471 | } | 533 | } |
472 | 534 | ||
473 | /* Reserving and releasing of CPUs */ | 535 | static int cpu_stopped(int cpu) |
474 | |||
475 | static DEFINE_SPINLOCK(smp_reserve_lock); | ||
476 | static int smp_cpu_reserved[NR_CPUS]; | ||
477 | |||
478 | int | ||
479 | smp_get_cpu(cpumask_t cpu_mask) | ||
480 | { | ||
481 | unsigned long flags; | ||
482 | int cpu; | ||
483 | |||
484 | spin_lock_irqsave(&smp_reserve_lock, flags); | ||
485 | /* Try to find an already reserved cpu. */ | ||
486 | for_each_cpu_mask(cpu, cpu_mask) { | ||
487 | if (smp_cpu_reserved[cpu] != 0) { | ||
488 | smp_cpu_reserved[cpu]++; | ||
489 | /* Found one. */ | ||
490 | goto out; | ||
491 | } | ||
492 | } | ||
493 | /* Reserve a new cpu from cpu_mask. */ | ||
494 | for_each_cpu_mask(cpu, cpu_mask) { | ||
495 | if (cpu_online(cpu)) { | ||
496 | smp_cpu_reserved[cpu]++; | ||
497 | goto out; | ||
498 | } | ||
499 | } | ||
500 | cpu = -ENODEV; | ||
501 | out: | ||
502 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | ||
503 | return cpu; | ||
504 | } | ||
505 | |||
506 | void | ||
507 | smp_put_cpu(int cpu) | ||
508 | { | ||
509 | unsigned long flags; | ||
510 | |||
511 | spin_lock_irqsave(&smp_reserve_lock, flags); | ||
512 | smp_cpu_reserved[cpu]--; | ||
513 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | ||
514 | } | ||
515 | |||
516 | static int | ||
517 | cpu_stopped(int cpu) | ||
518 | { | 536 | { |
519 | __u32 status; | 537 | __u32 status; |
520 | 538 | ||
521 | /* Check for stopped state */ | 539 | /* Check for stopped state */ |
522 | if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { | 540 | if (signal_processor_ps(&status, 0, cpu, sigp_sense) == |
541 | sigp_status_stored) { | ||
523 | if (status & 0x40) | 542 | if (status & 0x40) |
524 | return 1; | 543 | return 1; |
525 | } | 544 | } |
@@ -528,14 +547,13 @@ cpu_stopped(int cpu) | |||
528 | 547 | ||
529 | /* Upping and downing of CPUs */ | 548 | /* Upping and downing of CPUs */ |
530 | 549 | ||
531 | int | 550 | int __cpu_up(unsigned int cpu) |
532 | __cpu_up(unsigned int cpu) | ||
533 | { | 551 | { |
534 | struct task_struct *idle; | 552 | struct task_struct *idle; |
535 | struct _lowcore *cpu_lowcore; | 553 | struct _lowcore *cpu_lowcore; |
536 | struct stack_frame *sf; | 554 | struct stack_frame *sf; |
537 | sigp_ccode ccode; | 555 | sigp_ccode ccode; |
538 | int curr_cpu; | 556 | int curr_cpu; |
539 | 557 | ||
540 | for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { | 558 | for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { |
541 | __cpu_logical_map[cpu] = (__u16) curr_cpu; | 559 | __cpu_logical_map[cpu] = (__u16) curr_cpu; |
@@ -548,7 +566,7 @@ __cpu_up(unsigned int cpu) | |||
548 | 566 | ||
549 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), | 567 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), |
550 | cpu, sigp_set_prefix); | 568 | cpu, sigp_set_prefix); |
551 | if (ccode){ | 569 | if (ccode) { |
552 | printk("sigp_set_prefix failed for cpu %d " | 570 | printk("sigp_set_prefix failed for cpu %d " |
553 | "with condition code %d\n", | 571 | "with condition code %d\n", |
554 | (int) cpu, (int) ccode); | 572 | (int) cpu, (int) ccode); |
@@ -556,9 +574,9 @@ __cpu_up(unsigned int cpu) | |||
556 | } | 574 | } |
557 | 575 | ||
558 | idle = current_set[cpu]; | 576 | idle = current_set[cpu]; |
559 | cpu_lowcore = lowcore_ptr[cpu]; | 577 | cpu_lowcore = lowcore_ptr[cpu]; |
560 | cpu_lowcore->kernel_stack = (unsigned long) | 578 | cpu_lowcore->kernel_stack = (unsigned long) |
561 | task_stack_page(idle) + (THREAD_SIZE); | 579 | task_stack_page(idle) + THREAD_SIZE; |
562 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack | 580 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack |
563 | - sizeof(struct pt_regs) | 581 | - sizeof(struct pt_regs) |
564 | - sizeof(struct stack_frame)); | 582 | - sizeof(struct stack_frame)); |
@@ -570,11 +588,11 @@ __cpu_up(unsigned int cpu) | |||
570 | " stam 0,15,0(%0)" | 588 | " stam 0,15,0(%0)" |
571 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); | 589 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); |
572 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; | 590 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; |
573 | cpu_lowcore->current_task = (unsigned long) idle; | 591 | cpu_lowcore->current_task = (unsigned long) idle; |
574 | cpu_lowcore->cpu_data.cpu_nr = cpu; | 592 | cpu_lowcore->cpu_data.cpu_nr = cpu; |
575 | eieio(); | 593 | eieio(); |
576 | 594 | ||
577 | while (signal_processor(cpu,sigp_restart) == sigp_busy) | 595 | while (signal_processor(cpu, sigp_restart) == sigp_busy) |
578 | udelay(10); | 596 | udelay(10); |
579 | 597 | ||
580 | while (!cpu_online(cpu)) | 598 | while (!cpu_online(cpu)) |
@@ -589,6 +607,7 @@ void __init smp_setup_cpu_possible_map(void) | |||
589 | { | 607 | { |
590 | unsigned int phy_cpus, pos_cpus, cpu; | 608 | unsigned int phy_cpus, pos_cpus, cpu; |
591 | 609 | ||
610 | smp_get_save_areas(); | ||
592 | phy_cpus = smp_count_cpus(); | 611 | phy_cpus = smp_count_cpus(); |
593 | pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); | 612 | pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); |
594 | 613 | ||
@@ -620,18 +639,11 @@ static int __init setup_possible_cpus(char *s) | |||
620 | } | 639 | } |
621 | early_param("possible_cpus", setup_possible_cpus); | 640 | early_param("possible_cpus", setup_possible_cpus); |
622 | 641 | ||
623 | int | 642 | int __cpu_disable(void) |
624 | __cpu_disable(void) | ||
625 | { | 643 | { |
626 | unsigned long flags; | ||
627 | struct ec_creg_mask_parms cr_parms; | 644 | struct ec_creg_mask_parms cr_parms; |
628 | int cpu = smp_processor_id(); | 645 | int cpu = smp_processor_id(); |
629 | 646 | ||
630 | spin_lock_irqsave(&smp_reserve_lock, flags); | ||
631 | if (smp_cpu_reserved[cpu] != 0) { | ||
632 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | ||
633 | return -EBUSY; | ||
634 | } | ||
635 | cpu_clear(cpu, cpu_online_map); | 647 | cpu_clear(cpu, cpu_online_map); |
636 | 648 | ||
637 | /* Disable pfault pseudo page faults on this cpu. */ | 649 | /* Disable pfault pseudo page faults on this cpu. */ |
@@ -642,24 +654,23 @@ __cpu_disable(void) | |||
642 | 654 | ||
643 | /* disable all external interrupts */ | 655 | /* disable all external interrupts */ |
644 | cr_parms.orvals[0] = 0; | 656 | cr_parms.orvals[0] = 0; |
645 | cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | | 657 | cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | |
646 | 1<<11 | 1<<10 | 1<< 6 | 1<< 4); | 658 | 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); |
647 | /* disable all I/O interrupts */ | 659 | /* disable all I/O interrupts */ |
648 | cr_parms.orvals[6] = 0; | 660 | cr_parms.orvals[6] = 0; |
649 | cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | | 661 | cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | |
650 | 1<<27 | 1<<26 | 1<<25 | 1<<24); | 662 | 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); |
651 | /* disable most machine checks */ | 663 | /* disable most machine checks */ |
652 | cr_parms.orvals[14] = 0; | 664 | cr_parms.orvals[14] = 0; |
653 | cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); | 665 | cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | |
666 | 1 << 25 | 1 << 24); | ||
654 | 667 | ||
655 | smp_ctl_bit_callback(&cr_parms); | 668 | smp_ctl_bit_callback(&cr_parms); |
656 | 669 | ||
657 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | ||
658 | return 0; | 670 | return 0; |
659 | } | 671 | } |
660 | 672 | ||
661 | void | 673 | void __cpu_die(unsigned int cpu) |
662 | __cpu_die(unsigned int cpu) | ||
663 | { | 674 | { |
664 | /* Wait until target cpu is down */ | 675 | /* Wait until target cpu is down */ |
665 | while (!smp_cpu_not_running(cpu)) | 676 | while (!smp_cpu_not_running(cpu)) |
@@ -667,13 +678,12 @@ __cpu_die(unsigned int cpu) | |||
667 | printk("Processor %d spun down\n", cpu); | 678 | printk("Processor %d spun down\n", cpu); |
668 | } | 679 | } |
669 | 680 | ||
670 | void | 681 | void cpu_die(void) |
671 | cpu_die(void) | ||
672 | { | 682 | { |
673 | idle_task_exit(); | 683 | idle_task_exit(); |
674 | signal_processor(smp_processor_id(), sigp_stop); | 684 | signal_processor(smp_processor_id(), sigp_stop); |
675 | BUG(); | 685 | BUG(); |
676 | for(;;); | 686 | for (;;); |
677 | } | 687 | } |
678 | 688 | ||
679 | #endif /* CONFIG_HOTPLUG_CPU */ | 689 | #endif /* CONFIG_HOTPLUG_CPU */ |
@@ -686,36 +696,36 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
686 | { | 696 | { |
687 | unsigned long stack; | 697 | unsigned long stack; |
688 | unsigned int cpu; | 698 | unsigned int cpu; |
689 | int i; | 699 | int i; |
690 | 700 | ||
691 | /* request the 0x1201 emergency signal external interrupt */ | 701 | /* request the 0x1201 emergency signal external interrupt */ |
692 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 702 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
693 | panic("Couldn't request external interrupt 0x1201"); | 703 | panic("Couldn't request external interrupt 0x1201"); |
694 | memset(lowcore_ptr,0,sizeof(lowcore_ptr)); | 704 | memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); |
695 | /* | 705 | /* |
696 | * Initialize prefix pages and stacks for all possible cpus | 706 | * Initialize prefix pages and stacks for all possible cpus |
697 | */ | 707 | */ |
698 | print_cpu_info(&S390_lowcore.cpu_data); | 708 | print_cpu_info(&S390_lowcore.cpu_data); |
699 | 709 | ||
700 | for_each_possible_cpu(i) { | 710 | for_each_possible_cpu(i) { |
701 | lowcore_ptr[i] = (struct _lowcore *) | 711 | lowcore_ptr[i] = (struct _lowcore *) |
702 | __get_free_pages(GFP_KERNEL|GFP_DMA, | 712 | __get_free_pages(GFP_KERNEL | GFP_DMA, |
703 | sizeof(void*) == 8 ? 1 : 0); | 713 | sizeof(void*) == 8 ? 1 : 0); |
704 | stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); | 714 | stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); |
705 | if (lowcore_ptr[i] == NULL || stack == 0ULL) | 715 | if (!lowcore_ptr[i] || !stack) |
706 | panic("smp_boot_cpus failed to allocate memory\n"); | 716 | panic("smp_boot_cpus failed to allocate memory\n"); |
707 | 717 | ||
708 | *(lowcore_ptr[i]) = S390_lowcore; | 718 | *(lowcore_ptr[i]) = S390_lowcore; |
709 | lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); | 719 | lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; |
710 | stack = __get_free_pages(GFP_KERNEL,0); | 720 | stack = __get_free_pages(GFP_KERNEL, 0); |
711 | if (stack == 0ULL) | 721 | if (!stack) |
712 | panic("smp_boot_cpus failed to allocate memory\n"); | 722 | panic("smp_boot_cpus failed to allocate memory\n"); |
713 | lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); | 723 | lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; |
714 | #ifndef CONFIG_64BIT | 724 | #ifndef CONFIG_64BIT |
715 | if (MACHINE_HAS_IEEE) { | 725 | if (MACHINE_HAS_IEEE) { |
716 | lowcore_ptr[i]->extended_save_area_addr = | 726 | lowcore_ptr[i]->extended_save_area_addr = |
717 | (__u32) __get_free_pages(GFP_KERNEL,0); | 727 | (__u32) __get_free_pages(GFP_KERNEL, 0); |
718 | if (lowcore_ptr[i]->extended_save_area_addr == 0) | 728 | if (!lowcore_ptr[i]->extended_save_area_addr) |
719 | panic("smp_boot_cpus failed to " | 729 | panic("smp_boot_cpus failed to " |
720 | "allocate memory\n"); | 730 | "allocate memory\n"); |
721 | } | 731 | } |
@@ -754,34 +764,63 @@ void smp_cpus_done(unsigned int max_cpus) | |||
754 | */ | 764 | */ |
755 | int setup_profiling_timer(unsigned int multiplier) | 765 | int setup_profiling_timer(unsigned int multiplier) |
756 | { | 766 | { |
757 | return 0; | 767 | return 0; |
758 | } | 768 | } |
759 | 769 | ||
760 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 770 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
761 | 771 | ||
772 | static ssize_t show_capability(struct sys_device *dev, char *buf) | ||
773 | { | ||
774 | unsigned int capability; | ||
775 | int rc; | ||
776 | |||
777 | rc = get_cpu_capability(&capability); | ||
778 | if (rc) | ||
779 | return rc; | ||
780 | return sprintf(buf, "%u\n", capability); | ||
781 | } | ||
782 | static SYSDEV_ATTR(capability, 0444, show_capability, NULL); | ||
783 | |||
784 | static int __cpuinit smp_cpu_notify(struct notifier_block *self, | ||
785 | unsigned long action, void *hcpu) | ||
786 | { | ||
787 | unsigned int cpu = (unsigned int)(long)hcpu; | ||
788 | struct cpu *c = &per_cpu(cpu_devices, cpu); | ||
789 | struct sys_device *s = &c->sysdev; | ||
790 | |||
791 | switch (action) { | ||
792 | case CPU_ONLINE: | ||
793 | if (sysdev_create_file(s, &attr_capability)) | ||
794 | return NOTIFY_BAD; | ||
795 | break; | ||
796 | case CPU_DEAD: | ||
797 | sysdev_remove_file(s, &attr_capability); | ||
798 | break; | ||
799 | } | ||
800 | return NOTIFY_OK; | ||
801 | } | ||
802 | |||
803 | static struct notifier_block __cpuinitdata smp_cpu_nb = { | ||
804 | .notifier_call = smp_cpu_notify, | ||
805 | }; | ||
806 | |||
762 | static int __init topology_init(void) | 807 | static int __init topology_init(void) |
763 | { | 808 | { |
764 | int cpu; | 809 | int cpu; |
765 | int ret; | 810 | |
811 | register_cpu_notifier(&smp_cpu_nb); | ||
766 | 812 | ||
767 | for_each_possible_cpu(cpu) { | 813 | for_each_possible_cpu(cpu) { |
768 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 814 | struct cpu *c = &per_cpu(cpu_devices, cpu); |
815 | struct sys_device *s = &c->sysdev; | ||
769 | 816 | ||
770 | c->hotpluggable = 1; | 817 | c->hotpluggable = 1; |
771 | ret = register_cpu(c, cpu); | 818 | register_cpu(c, cpu); |
772 | if (ret) | 819 | if (!cpu_online(cpu)) |
773 | printk(KERN_WARNING "topology_init: register_cpu %d " | 820 | continue; |
774 | "failed (%d)\n", cpu, ret); | 821 | s = &c->sysdev; |
822 | sysdev_create_file(s, &attr_capability); | ||
775 | } | 823 | } |
776 | return 0; | 824 | return 0; |
777 | } | 825 | } |
778 | |||
779 | subsys_initcall(topology_init); | 826 | subsys_initcall(topology_init); |
780 | |||
781 | EXPORT_SYMBOL(cpu_online_map); | ||
782 | EXPORT_SYMBOL(cpu_possible_map); | ||
783 | EXPORT_SYMBOL(lowcore_ptr); | ||
784 | EXPORT_SYMBOL(smp_ctl_set_bit); | ||
785 | EXPORT_SYMBOL(smp_ctl_clear_bit); | ||
786 | EXPORT_SYMBOL(smp_get_cpu); | ||
787 | EXPORT_SYMBOL(smp_put_cpu); | ||