aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2007-04-27 10:02:00 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-04-27 10:01:47 -0400
commit39ce010d38bf6703b49f59eb73bef030b1d659f2 (patch)
tree76a0ca6ba8289644def45c30d214dd7d8b2921a4 /arch/s390/kernel
parent9ff6f4577e69801a43c0d58606a80040aecbc4bc (diff)
[S390] Clean up smp code in preparation for some larger changes.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/smp.c261
1 files changed, 100 insertions, 161 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2c5de92958dd..3754e2031b39 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * arch/s390/kernel/smp.c 2 * arch/s390/kernel/smp.c
3 * 3 *
4 * Copyright (C) IBM Corp. 1999,2006 4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com) 7 * Heiko Carstens (heiko.carstens@de.ibm.com)
8 * 8 *
9 * based on other smp stuff by 9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar 11 * (c) 1998 Ingo Molnar
12 * 12 *
@@ -43,16 +43,17 @@
43#include <asm/timer.h> 43#include <asm/timer.h>
44#include <asm/lowcore.h> 44#include <asm/lowcore.h>
45 45
46extern volatile int __cpu_logical_map[];
47
48/* 46/*
49 * An array with a pointer the lowcore of every CPU. 47 * An array with a pointer the lowcore of every CPU.
50 */ 48 */
51
52struct _lowcore *lowcore_ptr[NR_CPUS]; 49struct _lowcore *lowcore_ptr[NR_CPUS];
50EXPORT_SYMBOL(lowcore_ptr);
53 51
54cpumask_t cpu_online_map = CPU_MASK_NONE; 52cpumask_t cpu_online_map = CPU_MASK_NONE;
53EXPORT_SYMBOL(cpu_online_map);
54
55cpumask_t cpu_possible_map = CPU_MASK_NONE; 55cpumask_t cpu_possible_map = CPU_MASK_NONE;
56EXPORT_SYMBOL(cpu_possible_map);
56 57
57static struct task_struct *current_set[NR_CPUS]; 58static struct task_struct *current_set[NR_CPUS];
58 59
@@ -72,7 +73,7 @@ struct call_data_struct {
72 int wait; 73 int wait;
73}; 74};
74 75
75static struct call_data_struct * call_data; 76static struct call_data_struct *call_data;
76 77
77/* 78/*
78 * 'Call function' interrupt callback 79 * 'Call function' interrupt callback
@@ -152,8 +153,8 @@ out:
152 * 153 *
153 * Run a function on all other CPUs. 154 * Run a function on all other CPUs.
154 * 155 *
155 * You must not call this function with disabled interrupts or from a 156 * You must not call this function with disabled interrupts, from a
156 * hardware interrupt handler. You may call it from a bottom half. 157 * hardware interrupt handler or from a bottom half.
157 */ 158 */
158int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 159int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
159 int wait) 160 int wait)
@@ -179,11 +180,11 @@ EXPORT_SYMBOL(smp_call_function);
179 * 180 *
180 * Run a function on one processor. 181 * Run a function on one processor.
181 * 182 *
182 * You must not call this function with disabled interrupts or from a 183 * You must not call this function with disabled interrupts, from a
183 * hardware interrupt handler. You may call it from a bottom half. 184 * hardware interrupt handler or from a bottom half.
184 */ 185 */
185int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, 186int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
186 int wait, int cpu) 187 int wait, int cpu)
187{ 188{
188 cpumask_t map = CPU_MASK_NONE; 189 cpumask_t map = CPU_MASK_NONE;
189 190
@@ -197,9 +198,9 @@ EXPORT_SYMBOL(smp_call_function_on);
197 198
198static void do_send_stop(void) 199static void do_send_stop(void)
199{ 200{
200 int cpu, rc; 201 int cpu, rc;
201 202
202 /* stop all processors */ 203 /* stop all processors */
203 for_each_online_cpu(cpu) { 204 for_each_online_cpu(cpu) {
204 if (cpu == smp_processor_id()) 205 if (cpu == smp_processor_id())
205 continue; 206 continue;
@@ -211,9 +212,9 @@ static void do_send_stop(void)
211 212
212static void do_store_status(void) 213static void do_store_status(void)
213{ 214{
214 int cpu, rc; 215 int cpu, rc;
215 216
216 /* store status of all processors in their lowcores (real 0) */ 217 /* store status of all processors in their lowcores (real 0) */
217 for_each_online_cpu(cpu) { 218 for_each_online_cpu(cpu) {
218 if (cpu == smp_processor_id()) 219 if (cpu == smp_processor_id())
219 continue; 220 continue;
@@ -221,8 +222,8 @@ static void do_store_status(void)
221 rc = signal_processor_p( 222 rc = signal_processor_p(
222 (__u32)(unsigned long) lowcore_ptr[cpu], cpu, 223 (__u32)(unsigned long) lowcore_ptr[cpu], cpu,
223 sigp_store_status_at_address); 224 sigp_store_status_at_address);
224 } while(rc == sigp_busy); 225 } while (rc == sigp_busy);
225 } 226 }
226} 227}
227 228
228static void do_wait_for_stop(void) 229static void do_wait_for_stop(void)
@@ -233,7 +234,7 @@ static void do_wait_for_stop(void)
233 for_each_online_cpu(cpu) { 234 for_each_online_cpu(cpu) {
234 if (cpu == smp_processor_id()) 235 if (cpu == smp_processor_id())
235 continue; 236 continue;
236 while(!smp_cpu_not_running(cpu)) 237 while (!smp_cpu_not_running(cpu))
237 cpu_relax(); 238 cpu_relax();
238 } 239 }
239} 240}
@@ -247,7 +248,7 @@ void smp_send_stop(void)
247 /* Disable all interrupts/machine checks */ 248 /* Disable all interrupts/machine checks */
248 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); 249 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
249 250
250 /* write magic number to zero page (absolute 0) */ 251 /* write magic number to zero page (absolute 0) */
251 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 252 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
252 253
253 /* stop other processors. */ 254 /* stop other processors. */
@@ -263,8 +264,7 @@ void smp_send_stop(void)
263/* 264/*
264 * Reboot, halt and power_off routines for SMP. 265 * Reboot, halt and power_off routines for SMP.
265 */ 266 */
266 267void machine_restart_smp(char *__unused)
267void machine_restart_smp(char * __unused)
268{ 268{
269 smp_send_stop(); 269 smp_send_stop();
270 do_reipl(); 270 do_reipl();
@@ -295,17 +295,17 @@ void machine_power_off_smp(void)
295 295
296static void do_ext_call_interrupt(__u16 code) 296static void do_ext_call_interrupt(__u16 code)
297{ 297{
298 unsigned long bits; 298 unsigned long bits;
299 299
300 /* 300 /*
301 * handle bit signal external calls 301 * handle bit signal external calls
302 * 302 *
303 * For the ec_schedule signal we have to do nothing. All the work 303 * For the ec_schedule signal we have to do nothing. All the work
304 * is done automatically when we return from the interrupt. 304 * is done automatically when we return from the interrupt.
305 */ 305 */
306 bits = xchg(&S390_lowcore.ext_call_fast, 0); 306 bits = xchg(&S390_lowcore.ext_call_fast, 0);
307 307
308 if (test_bit(ec_call_function, &bits)) 308 if (test_bit(ec_call_function, &bits))
309 do_call_function(); 309 do_call_function();
310} 310}
311 311
@@ -315,11 +315,11 @@ static void do_ext_call_interrupt(__u16 code)
315 */ 315 */
316static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 316static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
317{ 317{
318 /* 318 /*
319 * Set signaling bit in lowcore of target cpu and kick it 319 * Set signaling bit in lowcore of target cpu and kick it
320 */ 320 */
321 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 321 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
322 while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 322 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
323 udelay(10); 323 udelay(10);
324} 324}
325 325
@@ -334,7 +334,7 @@ void smp_ptlb_callback(void *info)
334 334
335void smp_ptlb_all(void) 335void smp_ptlb_all(void)
336{ 336{
337 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 337 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
338} 338}
339EXPORT_SYMBOL(smp_ptlb_all); 339EXPORT_SYMBOL(smp_ptlb_all);
340#endif /* ! CONFIG_64BIT */ 340#endif /* ! CONFIG_64BIT */
@@ -346,7 +346,7 @@ EXPORT_SYMBOL(smp_ptlb_all);
346 */ 346 */
347void smp_send_reschedule(int cpu) 347void smp_send_reschedule(int cpu)
348{ 348{
349 smp_ext_bitcall(cpu, ec_schedule); 349 smp_ext_bitcall(cpu, ec_schedule);
350} 350}
351 351
352/* 352/*
@@ -360,11 +360,12 @@ struct ec_creg_mask_parms {
360/* 360/*
361 * callback for setting/clearing control bits 361 * callback for setting/clearing control bits
362 */ 362 */
363static void smp_ctl_bit_callback(void *info) { 363static void smp_ctl_bit_callback(void *info)
364{
364 struct ec_creg_mask_parms *pp = info; 365 struct ec_creg_mask_parms *pp = info;
365 unsigned long cregs[16]; 366 unsigned long cregs[16];
366 int i; 367 int i;
367 368
368 __ctl_store(cregs, 0, 15); 369 __ctl_store(cregs, 0, 15);
369 for (i = 0; i <= 15; i++) 370 for (i = 0; i <= 15; i++)
370 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 371 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
@@ -383,6 +384,7 @@ void smp_ctl_set_bit(int cr, int bit)
383 parms.orvals[cr] = 1 << bit; 384 parms.orvals[cr] = 1 << bit;
384 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 385 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
385} 386}
387EXPORT_SYMBOL(smp_ctl_set_bit);
386 388
387/* 389/*
388 * Clear a bit in a control register of all cpus 390 * Clear a bit in a control register of all cpus
@@ -396,6 +398,7 @@ void smp_ctl_clear_bit(int cr, int bit)
396 parms.andvals[cr] = ~(1L << bit); 398 parms.andvals[cr] = ~(1L << bit);
397 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 399 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
398} 400}
401EXPORT_SYMBOL(smp_ctl_clear_bit);
399 402
400#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 403#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
401 404
@@ -460,8 +463,7 @@ EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
460 * Lets check how many CPUs we have. 463 * Lets check how many CPUs we have.
461 */ 464 */
462 465
463static unsigned int 466static unsigned int __init smp_count_cpus(void)
464__init smp_count_cpus(void)
465{ 467{
466 unsigned int cpu, num_cpus; 468 unsigned int cpu, num_cpus;
467 __u16 boot_cpu_addr; 469 __u16 boot_cpu_addr;
@@ -477,31 +479,30 @@ __init smp_count_cpus(void)
477 if ((__u16) cpu == boot_cpu_addr) 479 if ((__u16) cpu == boot_cpu_addr)
478 continue; 480 continue;
479 __cpu_logical_map[1] = (__u16) cpu; 481 __cpu_logical_map[1] = (__u16) cpu;
480 if (signal_processor(1, sigp_sense) == 482 if (signal_processor(1, sigp_sense) == sigp_not_operational)
481 sigp_not_operational)
482 continue; 483 continue;
483 num_cpus++; 484 num_cpus++;
484 } 485 }
485 486
486 printk("Detected %d CPU's\n",(int) num_cpus); 487 printk("Detected %d CPU's\n", (int) num_cpus);
487 printk("Boot cpu address %2X\n", boot_cpu_addr); 488 printk("Boot cpu address %2X\n", boot_cpu_addr);
488 489
489 return num_cpus; 490 return num_cpus;
490} 491}
491 492
492/* 493/*
493 * Activate a secondary processor. 494 * Activate a secondary processor.
494 */ 495 */
495int __devinit start_secondary(void *cpuvoid) 496int __devinit start_secondary(void *cpuvoid)
496{ 497{
497 /* Setup the cpu */ 498 /* Setup the cpu */
498 cpu_init(); 499 cpu_init();
499 preempt_disable(); 500 preempt_disable();
500 /* Enable TOD clock interrupts on the secondary cpu. */ 501 /* Enable TOD clock interrupts on the secondary cpu. */
501 init_cpu_timer(); 502 init_cpu_timer();
502#ifdef CONFIG_VIRT_TIMER 503#ifdef CONFIG_VIRT_TIMER
503 /* Enable cpu timer interrupts on the secondary cpu. */ 504 /* Enable cpu timer interrupts on the secondary cpu. */
504 init_cpu_vtimer(); 505 init_cpu_vtimer();
505#endif 506#endif
506 /* Enable pfault pseudo page faults on this cpu. */ 507 /* Enable pfault pseudo page faults on this cpu. */
507 pfault_init(); 508 pfault_init();
@@ -510,11 +511,11 @@ int __devinit start_secondary(void *cpuvoid)
510 cpu_set(smp_processor_id(), cpu_online_map); 511 cpu_set(smp_processor_id(), cpu_online_map);
511 /* Switch on interrupts */ 512 /* Switch on interrupts */
512 local_irq_enable(); 513 local_irq_enable();
513 /* Print info about this processor */ 514 /* Print info about this processor */
514 print_cpu_info(&S390_lowcore.cpu_data); 515 print_cpu_info(&S390_lowcore.cpu_data);
515 /* cpu_idle will call schedule for us */ 516 /* cpu_idle will call schedule for us */
516 cpu_idle(); 517 cpu_idle();
517 return 0; 518 return 0;
518} 519}
519 520
520static void __init smp_create_idle(unsigned int cpu) 521static void __init smp_create_idle(unsigned int cpu)
@@ -531,56 +532,13 @@ static void __init smp_create_idle(unsigned int cpu)
531 current_set[cpu] = p; 532 current_set[cpu] = p;
532} 533}
533 534
534/* Reserving and releasing of CPUs */ 535static int cpu_stopped(int cpu)
535
536static DEFINE_SPINLOCK(smp_reserve_lock);
537static int smp_cpu_reserved[NR_CPUS];
538
539int
540smp_get_cpu(cpumask_t cpu_mask)
541{
542 unsigned long flags;
543 int cpu;
544
545 spin_lock_irqsave(&smp_reserve_lock, flags);
546 /* Try to find an already reserved cpu. */
547 for_each_cpu_mask(cpu, cpu_mask) {
548 if (smp_cpu_reserved[cpu] != 0) {
549 smp_cpu_reserved[cpu]++;
550 /* Found one. */
551 goto out;
552 }
553 }
554 /* Reserve a new cpu from cpu_mask. */
555 for_each_cpu_mask(cpu, cpu_mask) {
556 if (cpu_online(cpu)) {
557 smp_cpu_reserved[cpu]++;
558 goto out;
559 }
560 }
561 cpu = -ENODEV;
562out:
563 spin_unlock_irqrestore(&smp_reserve_lock, flags);
564 return cpu;
565}
566
567void
568smp_put_cpu(int cpu)
569{
570 unsigned long flags;
571
572 spin_lock_irqsave(&smp_reserve_lock, flags);
573 smp_cpu_reserved[cpu]--;
574 spin_unlock_irqrestore(&smp_reserve_lock, flags);
575}
576
577static int
578cpu_stopped(int cpu)
579{ 536{
580 __u32 status; 537 __u32 status;
581 538
582 /* Check for stopped state */ 539 /* Check for stopped state */
583 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { 540 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
541 sigp_status_stored) {
584 if (status & 0x40) 542 if (status & 0x40)
585 return 1; 543 return 1;
586 } 544 }
@@ -589,14 +547,13 @@ cpu_stopped(int cpu)
589 547
590/* Upping and downing of CPUs */ 548/* Upping and downing of CPUs */
591 549
592int 550int __cpu_up(unsigned int cpu)
593__cpu_up(unsigned int cpu)
594{ 551{
595 struct task_struct *idle; 552 struct task_struct *idle;
596 struct _lowcore *cpu_lowcore; 553 struct _lowcore *cpu_lowcore;
597 struct stack_frame *sf; 554 struct stack_frame *sf;
598 sigp_ccode ccode; 555 sigp_ccode ccode;
599 int curr_cpu; 556 int curr_cpu;
600 557
601 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 558 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
602 __cpu_logical_map[cpu] = (__u16) curr_cpu; 559 __cpu_logical_map[cpu] = (__u16) curr_cpu;
@@ -609,7 +566,7 @@ __cpu_up(unsigned int cpu)
609 566
610 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 567 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
611 cpu, sigp_set_prefix); 568 cpu, sigp_set_prefix);
612 if (ccode){ 569 if (ccode) {
613 printk("sigp_set_prefix failed for cpu %d " 570 printk("sigp_set_prefix failed for cpu %d "
614 "with condition code %d\n", 571 "with condition code %d\n",
615 (int) cpu, (int) ccode); 572 (int) cpu, (int) ccode);
@@ -617,9 +574,9 @@ __cpu_up(unsigned int cpu)
617 } 574 }
618 575
619 idle = current_set[cpu]; 576 idle = current_set[cpu];
620 cpu_lowcore = lowcore_ptr[cpu]; 577 cpu_lowcore = lowcore_ptr[cpu];
621 cpu_lowcore->kernel_stack = (unsigned long) 578 cpu_lowcore->kernel_stack = (unsigned long)
622 task_stack_page(idle) + (THREAD_SIZE); 579 task_stack_page(idle) + THREAD_SIZE;
623 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 580 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
624 - sizeof(struct pt_regs) 581 - sizeof(struct pt_regs)
625 - sizeof(struct stack_frame)); 582 - sizeof(struct stack_frame));
@@ -631,11 +588,11 @@ __cpu_up(unsigned int cpu)
631 " stam 0,15,0(%0)" 588 " stam 0,15,0(%0)"
632 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 589 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
633 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 590 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
634 cpu_lowcore->current_task = (unsigned long) idle; 591 cpu_lowcore->current_task = (unsigned long) idle;
635 cpu_lowcore->cpu_data.cpu_nr = cpu; 592 cpu_lowcore->cpu_data.cpu_nr = cpu;
636 eieio(); 593 eieio();
637 594
638 while (signal_processor(cpu,sigp_restart) == sigp_busy) 595 while (signal_processor(cpu, sigp_restart) == sigp_busy)
639 udelay(10); 596 udelay(10);
640 597
641 while (!cpu_online(cpu)) 598 while (!cpu_online(cpu))
@@ -682,18 +639,11 @@ static int __init setup_possible_cpus(char *s)
682} 639}
683early_param("possible_cpus", setup_possible_cpus); 640early_param("possible_cpus", setup_possible_cpus);
684 641
685int 642int __cpu_disable(void)
686__cpu_disable(void)
687{ 643{
688 unsigned long flags;
689 struct ec_creg_mask_parms cr_parms; 644 struct ec_creg_mask_parms cr_parms;
690 int cpu = smp_processor_id(); 645 int cpu = smp_processor_id();
691 646
692 spin_lock_irqsave(&smp_reserve_lock, flags);
693 if (smp_cpu_reserved[cpu] != 0) {
694 spin_unlock_irqrestore(&smp_reserve_lock, flags);
695 return -EBUSY;
696 }
697 cpu_clear(cpu, cpu_online_map); 647 cpu_clear(cpu, cpu_online_map);
698 648
699 /* Disable pfault pseudo page faults on this cpu. */ 649 /* Disable pfault pseudo page faults on this cpu. */
@@ -704,24 +654,23 @@ __cpu_disable(void)
704 654
705 /* disable all external interrupts */ 655 /* disable all external interrupts */
706 cr_parms.orvals[0] = 0; 656 cr_parms.orvals[0] = 0;
707 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | 657 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
708 1<<11 | 1<<10 | 1<< 6 | 1<< 4); 658 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
709 /* disable all I/O interrupts */ 659 /* disable all I/O interrupts */
710 cr_parms.orvals[6] = 0; 660 cr_parms.orvals[6] = 0;
711 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | 661 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
712 1<<27 | 1<<26 | 1<<25 | 1<<24); 662 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
713 /* disable most machine checks */ 663 /* disable most machine checks */
714 cr_parms.orvals[14] = 0; 664 cr_parms.orvals[14] = 0;
715 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); 665 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
666 1 << 25 | 1 << 24);
716 667
717 smp_ctl_bit_callback(&cr_parms); 668 smp_ctl_bit_callback(&cr_parms);
718 669
719 spin_unlock_irqrestore(&smp_reserve_lock, flags);
720 return 0; 670 return 0;
721} 671}
722 672
723void 673void __cpu_die(unsigned int cpu)
724__cpu_die(unsigned int cpu)
725{ 674{
726 /* Wait until target cpu is down */ 675 /* Wait until target cpu is down */
727 while (!smp_cpu_not_running(cpu)) 676 while (!smp_cpu_not_running(cpu))
@@ -729,13 +678,12 @@ __cpu_die(unsigned int cpu)
729 printk("Processor %d spun down\n", cpu); 678 printk("Processor %d spun down\n", cpu);
730} 679}
731 680
732void 681void cpu_die(void)
733cpu_die(void)
734{ 682{
735 idle_task_exit(); 683 idle_task_exit();
736 signal_processor(smp_processor_id(), sigp_stop); 684 signal_processor(smp_processor_id(), sigp_stop);
737 BUG(); 685 BUG();
738 for(;;); 686 for (;;);
739} 687}
740 688
741#endif /* CONFIG_HOTPLUG_CPU */ 689#endif /* CONFIG_HOTPLUG_CPU */
@@ -748,36 +696,36 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
748{ 696{
749 unsigned long stack; 697 unsigned long stack;
750 unsigned int cpu; 698 unsigned int cpu;
751 int i; 699 int i;
752 700
753 /* request the 0x1201 emergency signal external interrupt */ 701 /* request the 0x1201 emergency signal external interrupt */
754 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 702 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
755 panic("Couldn't request external interrupt 0x1201"); 703 panic("Couldn't request external interrupt 0x1201");
756 memset(lowcore_ptr,0,sizeof(lowcore_ptr)); 704 memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
757 /* 705 /*
758 * Initialize prefix pages and stacks for all possible cpus 706 * Initialize prefix pages and stacks for all possible cpus
759 */ 707 */
760 print_cpu_info(&S390_lowcore.cpu_data); 708 print_cpu_info(&S390_lowcore.cpu_data);
761 709
762 for_each_possible_cpu(i) { 710 for_each_possible_cpu(i) {
763 lowcore_ptr[i] = (struct _lowcore *) 711 lowcore_ptr[i] = (struct _lowcore *)
764 __get_free_pages(GFP_KERNEL|GFP_DMA, 712 __get_free_pages(GFP_KERNEL | GFP_DMA,
765 sizeof(void*) == 8 ? 1 : 0); 713 sizeof(void*) == 8 ? 1 : 0);
766 stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); 714 stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
767 if (lowcore_ptr[i] == NULL || stack == 0ULL) 715 if (!lowcore_ptr[i] || !stack)
768 panic("smp_boot_cpus failed to allocate memory\n"); 716 panic("smp_boot_cpus failed to allocate memory\n");
769 717
770 *(lowcore_ptr[i]) = S390_lowcore; 718 *(lowcore_ptr[i]) = S390_lowcore;
771 lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); 719 lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
772 stack = __get_free_pages(GFP_KERNEL,0); 720 stack = __get_free_pages(GFP_KERNEL, 0);
773 if (stack == 0ULL) 721 if (!stack)
774 panic("smp_boot_cpus failed to allocate memory\n"); 722 panic("smp_boot_cpus failed to allocate memory\n");
775 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); 723 lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
776#ifndef CONFIG_64BIT 724#ifndef CONFIG_64BIT
777 if (MACHINE_HAS_IEEE) { 725 if (MACHINE_HAS_IEEE) {
778 lowcore_ptr[i]->extended_save_area_addr = 726 lowcore_ptr[i]->extended_save_area_addr =
779 (__u32) __get_free_pages(GFP_KERNEL,0); 727 (__u32) __get_free_pages(GFP_KERNEL, 0);
780 if (lowcore_ptr[i]->extended_save_area_addr == 0) 728 if (!lowcore_ptr[i]->extended_save_area_addr)
781 panic("smp_boot_cpus failed to " 729 panic("smp_boot_cpus failed to "
782 "allocate memory\n"); 730 "allocate memory\n");
783 } 731 }
@@ -816,7 +764,7 @@ void smp_cpus_done(unsigned int max_cpus)
816 */ 764 */
817int setup_profiling_timer(unsigned int multiplier) 765int setup_profiling_timer(unsigned int multiplier)
818{ 766{
819 return 0; 767 return 0;
820} 768}
821 769
822static DEFINE_PER_CPU(struct cpu, cpu_devices); 770static DEFINE_PER_CPU(struct cpu, cpu_devices);
@@ -853,7 +801,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
853} 801}
854 802
855static struct notifier_block __cpuinitdata smp_cpu_nb = { 803static struct notifier_block __cpuinitdata smp_cpu_nb = {
856 .notifier_call = smp_cpu_notify, 804 .notifier_call = smp_cpu_notify,
857}; 805};
858 806
859static int __init topology_init(void) 807static int __init topology_init(void)
@@ -875,13 +823,4 @@ static int __init topology_init(void)
875 } 823 }
876 return 0; 824 return 0;
877} 825}
878
879subsys_initcall(topology_init); 826subsys_initcall(topology_init);
880
881EXPORT_SYMBOL(cpu_online_map);
882EXPORT_SYMBOL(cpu_possible_map);
883EXPORT_SYMBOL(lowcore_ptr);
884EXPORT_SYMBOL(smp_ctl_set_bit);
885EXPORT_SYMBOL(smp_ctl_clear_bit);
886EXPORT_SYMBOL(smp_get_cpu);
887EXPORT_SYMBOL(smp_put_cpu);