aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/smp.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-12-28 23:19:47 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-28 23:19:47 -0500
commite3c6d4ee545e427b55882d97d3b663c6411645fe (patch)
tree294326663fb757739a98083c2ddd570d1eaf7337 /arch/s390/kernel/smp.c
parent5bc053089376217943187ed5153d0d1e5c5085b6 (diff)
parent3c92ec8ae91ecf59d88c798301833d7cf83f2179 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: arch/sparc64/kernel/idprom.c
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r--arch/s390/kernel/smp.c201
1 files changed, 29 insertions, 172 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b5595688a477..6fc78541dc57 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -20,6 +20,9 @@
20 * cpu_number_map in other architectures. 20 * cpu_number_map in other architectures.
21 */ 21 */
22 22
23#define KMSG_COMPONENT "cpu"
24#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
25
23#include <linux/module.h> 26#include <linux/module.h>
24#include <linux/init.h> 27#include <linux/init.h>
25#include <linux/mm.h> 28#include <linux/mm.h>
@@ -77,159 +80,6 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
77 80
78static void smp_ext_bitcall(int, ec_bit_sig); 81static void smp_ext_bitcall(int, ec_bit_sig);
79 82
80/*
81 * Structure and data for __smp_call_function_map(). This is designed to
82 * minimise static memory requirements. It also looks cleaner.
83 */
84static DEFINE_SPINLOCK(call_lock);
85
86struct call_data_struct {
87 void (*func) (void *info);
88 void *info;
89 cpumask_t started;
90 cpumask_t finished;
91 int wait;
92};
93
94static struct call_data_struct *call_data;
95
96/*
97 * 'Call function' interrupt callback
98 */
99static void do_call_function(void)
100{
101 void (*func) (void *info) = call_data->func;
102 void *info = call_data->info;
103 int wait = call_data->wait;
104
105 cpu_set(smp_processor_id(), call_data->started);
106 (*func)(info);
107 if (wait)
108 cpu_set(smp_processor_id(), call_data->finished);;
109}
110
111static void __smp_call_function_map(void (*func) (void *info), void *info,
112 int wait, cpumask_t map)
113{
114 struct call_data_struct data;
115 int cpu, local = 0;
116
117 /*
118 * Can deadlock when interrupts are disabled or if in wrong context.
119 */
120 WARN_ON(irqs_disabled() || in_irq());
121
122 /*
123 * Check for local function call. We have to have the same call order
124 * as in on_each_cpu() because of machine_restart_smp().
125 */
126 if (cpu_isset(smp_processor_id(), map)) {
127 local = 1;
128 cpu_clear(smp_processor_id(), map);
129 }
130
131 cpus_and(map, map, cpu_online_map);
132 if (cpus_empty(map))
133 goto out;
134
135 data.func = func;
136 data.info = info;
137 data.started = CPU_MASK_NONE;
138 data.wait = wait;
139 if (wait)
140 data.finished = CPU_MASK_NONE;
141
142 call_data = &data;
143
144 for_each_cpu_mask(cpu, map)
145 smp_ext_bitcall(cpu, ec_call_function);
146
147 /* Wait for response */
148 while (!cpus_equal(map, data.started))
149 cpu_relax();
150 if (wait)
151 while (!cpus_equal(map, data.finished))
152 cpu_relax();
153out:
154 if (local) {
155 local_irq_disable();
156 func(info);
157 local_irq_enable();
158 }
159}
160
161/*
162 * smp_call_function:
163 * @func: the function to run; this must be fast and non-blocking
164 * @info: an arbitrary pointer to pass to the function
165 * @wait: if true, wait (atomically) until function has completed on other CPUs
166 *
167 * Run a function on all other CPUs.
168 *
169 * You must not call this function with disabled interrupts, from a
170 * hardware interrupt handler or from a bottom half.
171 */
172int smp_call_function(void (*func) (void *info), void *info, int wait)
173{
174 cpumask_t map;
175
176 spin_lock(&call_lock);
177 map = cpu_online_map;
178 cpu_clear(smp_processor_id(), map);
179 __smp_call_function_map(func, info, wait, map);
180 spin_unlock(&call_lock);
181 return 0;
182}
183EXPORT_SYMBOL(smp_call_function);
184
185/*
186 * smp_call_function_single:
187 * @cpu: the CPU where func should run
188 * @func: the function to run; this must be fast and non-blocking
189 * @info: an arbitrary pointer to pass to the function
190 * @wait: if true, wait (atomically) until function has completed on other CPUs
191 *
192 * Run a function on one processor.
193 *
194 * You must not call this function with disabled interrupts, from a
195 * hardware interrupt handler or from a bottom half.
196 */
197int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
198 int wait)
199{
200 spin_lock(&call_lock);
201 __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu));
202 spin_unlock(&call_lock);
203 return 0;
204}
205EXPORT_SYMBOL(smp_call_function_single);
206
207/**
208 * smp_call_function_mask(): Run a function on a set of other CPUs.
209 * @mask: The set of cpus to run on. Must not include the current cpu.
210 * @func: The function to run. This must be fast and non-blocking.
211 * @info: An arbitrary pointer to pass to the function.
212 * @wait: If true, wait (atomically) until function has completed on other CPUs.
213 *
214 * Returns 0 on success, else a negative status code.
215 *
216 * If @wait is true, then returns once @func has returned; otherwise
217 * it returns just before the target cpu calls @func.
218 *
219 * You must not call this function with disabled interrupts or from a
220 * hardware interrupt handler or from a bottom half handler.
221 */
222int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
223 int wait)
224{
225 spin_lock(&call_lock);
226 cpu_clear(smp_processor_id(), mask);
227 __smp_call_function_map(func, info, wait, mask);
228 spin_unlock(&call_lock);
229 return 0;
230}
231EXPORT_SYMBOL(smp_call_function_mask);
232
233void smp_send_stop(void) 83void smp_send_stop(void)
234{ 84{
235 int cpu, rc; 85 int cpu, rc;
@@ -271,7 +121,10 @@ static void do_ext_call_interrupt(__u16 code)
271 bits = xchg(&S390_lowcore.ext_call_fast, 0); 121 bits = xchg(&S390_lowcore.ext_call_fast, 0);
272 122
273 if (test_bit(ec_call_function, &bits)) 123 if (test_bit(ec_call_function, &bits))
274 do_call_function(); 124 generic_smp_call_function_interrupt();
125
126 if (test_bit(ec_call_function_single, &bits))
127 generic_smp_call_function_single_interrupt();
275} 128}
276 129
277/* 130/*
@@ -288,6 +141,19 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
288 udelay(10); 141 udelay(10);
289} 142}
290 143
144void arch_send_call_function_ipi(cpumask_t mask)
145{
146 int cpu;
147
148 for_each_cpu_mask(cpu, mask)
149 smp_ext_bitcall(cpu, ec_call_function);
150}
151
152void arch_send_call_function_single_ipi(int cpu)
153{
154 smp_ext_bitcall(cpu, ec_call_function_single);
155}
156
291#ifndef CONFIG_64BIT 157#ifndef CONFIG_64BIT
292/* 158/*
293 * this function sends a 'purge tlb' signal to another CPU. 159 * this function sends a 'purge tlb' signal to another CPU.
@@ -388,8 +254,8 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
388 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 254 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
389 return; 255 return;
390 if (cpu >= NR_CPUS) { 256 if (cpu >= NR_CPUS) {
391 printk(KERN_WARNING "Registers for cpu %i not saved since dump " 257 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
392 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); 258 "the dump\n", cpu, NR_CPUS - 1);
393 return; 259 return;
394 } 260 }
395 zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); 261 zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
@@ -562,7 +428,7 @@ static void __init smp_detect_cpus(void)
562 } 428 }
563out: 429out:
564 kfree(info); 430 kfree(info);
565 printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus); 431 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
566 get_online_cpus(); 432 get_online_cpus();
567 __smp_rescan_cpus(); 433 __smp_rescan_cpus();
568 put_online_cpus(); 434 put_online_cpus();
@@ -578,19 +444,17 @@ int __cpuinit start_secondary(void *cpuvoid)
578 preempt_disable(); 444 preempt_disable();
579 /* Enable TOD clock interrupts on the secondary cpu. */ 445 /* Enable TOD clock interrupts on the secondary cpu. */
580 init_cpu_timer(); 446 init_cpu_timer();
581#ifdef CONFIG_VIRT_TIMER
582 /* Enable cpu timer interrupts on the secondary cpu. */ 447 /* Enable cpu timer interrupts on the secondary cpu. */
583 init_cpu_vtimer(); 448 init_cpu_vtimer();
584#endif
585 /* Enable pfault pseudo page faults on this cpu. */ 449 /* Enable pfault pseudo page faults on this cpu. */
586 pfault_init(); 450 pfault_init();
587 451
588 /* call cpu notifiers */ 452 /* call cpu notifiers */
589 notify_cpu_starting(smp_processor_id()); 453 notify_cpu_starting(smp_processor_id());
590 /* Mark this cpu as online */ 454 /* Mark this cpu as online */
591 spin_lock(&call_lock); 455 ipi_call_lock();
592 cpu_set(smp_processor_id(), cpu_online_map); 456 cpu_set(smp_processor_id(), cpu_online_map);
593 spin_unlock(&call_lock); 457 ipi_call_unlock();
594 /* Switch on interrupts */ 458 /* Switch on interrupts */
595 local_irq_enable(); 459 local_irq_enable();
596 /* Print info about this processor */ 460 /* Print info about this processor */
@@ -639,18 +503,15 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
639 503
640 save_area = get_zeroed_page(GFP_KERNEL); 504 save_area = get_zeroed_page(GFP_KERNEL);
641 if (!save_area) 505 if (!save_area)
642 goto out_save_area; 506 goto out;
643 lowcore->extended_save_area_addr = (u32) save_area; 507 lowcore->extended_save_area_addr = (u32) save_area;
644 } 508 }
645#endif 509#endif
646 lowcore_ptr[cpu] = lowcore; 510 lowcore_ptr[cpu] = lowcore;
647 return 0; 511 return 0;
648 512
649#ifndef CONFIG_64BIT
650out_save_area:
651 free_page(panic_stack);
652#endif
653out: 513out:
514 free_page(panic_stack);
654 free_pages(async_stack, ASYNC_ORDER); 515 free_pages(async_stack, ASYNC_ORDER);
655 free_pages((unsigned long) lowcore, lc_order); 516 free_pages((unsigned long) lowcore, lc_order);
656 return -ENOMEM; 517 return -ENOMEM;
@@ -690,12 +551,8 @@ int __cpuinit __cpu_up(unsigned int cpu)
690 551
691 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 552 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
692 cpu, sigp_set_prefix); 553 cpu, sigp_set_prefix);
693 if (ccode) { 554 if (ccode)
694 printk("sigp_set_prefix failed for cpu %d "
695 "with condition code %d\n",
696 (int) cpu, (int) ccode);
697 return -EIO; 555 return -EIO;
698 }
699 556
700 idle = current_set[cpu]; 557 idle = current_set[cpu];
701 cpu_lowcore = lowcore_ptr[cpu]; 558 cpu_lowcore = lowcore_ptr[cpu];
@@ -778,7 +635,7 @@ void __cpu_die(unsigned int cpu)
778 while (!smp_cpu_not_running(cpu)) 635 while (!smp_cpu_not_running(cpu))
779 cpu_relax(); 636 cpu_relax();
780 smp_free_lowcore(cpu); 637 smp_free_lowcore(cpu);
781 printk(KERN_INFO "Processor %d spun down\n", cpu); 638 pr_info("Processor %d stopped\n", cpu);
782} 639}
783 640
784void cpu_die(void) 641void cpu_die(void)