diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-29 16:32:35 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-29 16:32:35 -0500 |
commit | 33edcf133ba93ecba2e4b6472e97b689895d805c (patch) | |
tree | 327d7a20acef64005e7c5ccbfa1265be28aeb6ac /arch/s390/kernel/smp.c | |
parent | be4d638c1597580ed2294d899d9f1a2cd10e462c (diff) | |
parent | 3c92ec8ae91ecf59d88c798301833d7cf83f2179 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r-- | arch/s390/kernel/smp.c | 201 |
1 files changed, 29 insertions, 172 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index f03914b8ed2f..3ed5c7a83c6c 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -20,6 +20,9 @@ | |||
20 | * cpu_number_map in other architectures. | 20 | * cpu_number_map in other architectures. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #define KMSG_COMPONENT "cpu" | ||
24 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
25 | |||
23 | #include <linux/module.h> | 26 | #include <linux/module.h> |
24 | #include <linux/init.h> | 27 | #include <linux/init.h> |
25 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
@@ -71,159 +74,6 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); | |||
71 | 74 | ||
72 | static void smp_ext_bitcall(int, ec_bit_sig); | 75 | static void smp_ext_bitcall(int, ec_bit_sig); |
73 | 76 | ||
74 | /* | ||
75 | * Structure and data for __smp_call_function_map(). This is designed to | ||
76 | * minimise static memory requirements. It also looks cleaner. | ||
77 | */ | ||
78 | static DEFINE_SPINLOCK(call_lock); | ||
79 | |||
80 | struct call_data_struct { | ||
81 | void (*func) (void *info); | ||
82 | void *info; | ||
83 | cpumask_t started; | ||
84 | cpumask_t finished; | ||
85 | int wait; | ||
86 | }; | ||
87 | |||
88 | static struct call_data_struct *call_data; | ||
89 | |||
90 | /* | ||
91 | * 'Call function' interrupt callback | ||
92 | */ | ||
93 | static void do_call_function(void) | ||
94 | { | ||
95 | void (*func) (void *info) = call_data->func; | ||
96 | void *info = call_data->info; | ||
97 | int wait = call_data->wait; | ||
98 | |||
99 | cpu_set(smp_processor_id(), call_data->started); | ||
100 | (*func)(info); | ||
101 | if (wait) | ||
102 | cpu_set(smp_processor_id(), call_data->finished);; | ||
103 | } | ||
104 | |||
105 | static void __smp_call_function_map(void (*func) (void *info), void *info, | ||
106 | int wait, cpumask_t map) | ||
107 | { | ||
108 | struct call_data_struct data; | ||
109 | int cpu, local = 0; | ||
110 | |||
111 | /* | ||
112 | * Can deadlock when interrupts are disabled or if in wrong context. | ||
113 | */ | ||
114 | WARN_ON(irqs_disabled() || in_irq()); | ||
115 | |||
116 | /* | ||
117 | * Check for local function call. We have to have the same call order | ||
118 | * as in on_each_cpu() because of machine_restart_smp(). | ||
119 | */ | ||
120 | if (cpu_isset(smp_processor_id(), map)) { | ||
121 | local = 1; | ||
122 | cpu_clear(smp_processor_id(), map); | ||
123 | } | ||
124 | |||
125 | cpus_and(map, map, cpu_online_map); | ||
126 | if (cpus_empty(map)) | ||
127 | goto out; | ||
128 | |||
129 | data.func = func; | ||
130 | data.info = info; | ||
131 | data.started = CPU_MASK_NONE; | ||
132 | data.wait = wait; | ||
133 | if (wait) | ||
134 | data.finished = CPU_MASK_NONE; | ||
135 | |||
136 | call_data = &data; | ||
137 | |||
138 | for_each_cpu_mask(cpu, map) | ||
139 | smp_ext_bitcall(cpu, ec_call_function); | ||
140 | |||
141 | /* Wait for response */ | ||
142 | while (!cpus_equal(map, data.started)) | ||
143 | cpu_relax(); | ||
144 | if (wait) | ||
145 | while (!cpus_equal(map, data.finished)) | ||
146 | cpu_relax(); | ||
147 | out: | ||
148 | if (local) { | ||
149 | local_irq_disable(); | ||
150 | func(info); | ||
151 | local_irq_enable(); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * smp_call_function: | ||
157 | * @func: the function to run; this must be fast and non-blocking | ||
158 | * @info: an arbitrary pointer to pass to the function | ||
159 | * @wait: if true, wait (atomically) until function has completed on other CPUs | ||
160 | * | ||
161 | * Run a function on all other CPUs. | ||
162 | * | ||
163 | * You must not call this function with disabled interrupts, from a | ||
164 | * hardware interrupt handler or from a bottom half. | ||
165 | */ | ||
166 | int smp_call_function(void (*func) (void *info), void *info, int wait) | ||
167 | { | ||
168 | cpumask_t map; | ||
169 | |||
170 | spin_lock(&call_lock); | ||
171 | map = cpu_online_map; | ||
172 | cpu_clear(smp_processor_id(), map); | ||
173 | __smp_call_function_map(func, info, wait, map); | ||
174 | spin_unlock(&call_lock); | ||
175 | return 0; | ||
176 | } | ||
177 | EXPORT_SYMBOL(smp_call_function); | ||
178 | |||
179 | /* | ||
180 | * smp_call_function_single: | ||
181 | * @cpu: the CPU where func should run | ||
182 | * @func: the function to run; this must be fast and non-blocking | ||
183 | * @info: an arbitrary pointer to pass to the function | ||
184 | * @wait: if true, wait (atomically) until function has completed on other CPUs | ||
185 | * | ||
186 | * Run a function on one processor. | ||
187 | * | ||
188 | * You must not call this function with disabled interrupts, from a | ||
189 | * hardware interrupt handler or from a bottom half. | ||
190 | */ | ||
191 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
192 | int wait) | ||
193 | { | ||
194 | spin_lock(&call_lock); | ||
195 | __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu)); | ||
196 | spin_unlock(&call_lock); | ||
197 | return 0; | ||
198 | } | ||
199 | EXPORT_SYMBOL(smp_call_function_single); | ||
200 | |||
201 | /** | ||
202 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
203 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
204 | * @func: The function to run. This must be fast and non-blocking. | ||
205 | * @info: An arbitrary pointer to pass to the function. | ||
206 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
207 | * | ||
208 | * Returns 0 on success, else a negative status code. | ||
209 | * | ||
210 | * If @wait is true, then returns once @func has returned; otherwise | ||
211 | * it returns just before the target cpu calls @func. | ||
212 | * | ||
213 | * You must not call this function with disabled interrupts or from a | ||
214 | * hardware interrupt handler or from a bottom half handler. | ||
215 | */ | ||
216 | int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | ||
217 | int wait) | ||
218 | { | ||
219 | spin_lock(&call_lock); | ||
220 | cpu_clear(smp_processor_id(), mask); | ||
221 | __smp_call_function_map(func, info, wait, mask); | ||
222 | spin_unlock(&call_lock); | ||
223 | return 0; | ||
224 | } | ||
225 | EXPORT_SYMBOL(smp_call_function_mask); | ||
226 | |||
227 | void smp_send_stop(void) | 77 | void smp_send_stop(void) |
228 | { | 78 | { |
229 | int cpu, rc; | 79 | int cpu, rc; |
@@ -265,7 +115,10 @@ static void do_ext_call_interrupt(__u16 code) | |||
265 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | 115 | bits = xchg(&S390_lowcore.ext_call_fast, 0); |
266 | 116 | ||
267 | if (test_bit(ec_call_function, &bits)) | 117 | if (test_bit(ec_call_function, &bits)) |
268 | do_call_function(); | 118 | generic_smp_call_function_interrupt(); |
119 | |||
120 | if (test_bit(ec_call_function_single, &bits)) | ||
121 | generic_smp_call_function_single_interrupt(); | ||
269 | } | 122 | } |
270 | 123 | ||
271 | /* | 124 | /* |
@@ -282,6 +135,19 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | |||
282 | udelay(10); | 135 | udelay(10); |
283 | } | 136 | } |
284 | 137 | ||
138 | void arch_send_call_function_ipi(cpumask_t mask) | ||
139 | { | ||
140 | int cpu; | ||
141 | |||
142 | for_each_cpu_mask(cpu, mask) | ||
143 | smp_ext_bitcall(cpu, ec_call_function); | ||
144 | } | ||
145 | |||
146 | void arch_send_call_function_single_ipi(int cpu) | ||
147 | { | ||
148 | smp_ext_bitcall(cpu, ec_call_function_single); | ||
149 | } | ||
150 | |||
285 | #ifndef CONFIG_64BIT | 151 | #ifndef CONFIG_64BIT |
286 | /* | 152 | /* |
287 | * this function sends a 'purge tlb' signal to another CPU. | 153 | * this function sends a 'purge tlb' signal to another CPU. |
@@ -382,8 +248,8 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | |||
382 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 248 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
383 | return; | 249 | return; |
384 | if (cpu >= NR_CPUS) { | 250 | if (cpu >= NR_CPUS) { |
385 | printk(KERN_WARNING "Registers for cpu %i not saved since dump " | 251 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " |
386 | "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); | 252 | "the dump\n", cpu, NR_CPUS - 1); |
387 | return; | 253 | return; |
388 | } | 254 | } |
389 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); | 255 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); |
@@ -556,7 +422,7 @@ static void __init smp_detect_cpus(void) | |||
556 | } | 422 | } |
557 | out: | 423 | out: |
558 | kfree(info); | 424 | kfree(info); |
559 | printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus); | 425 | pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); |
560 | get_online_cpus(); | 426 | get_online_cpus(); |
561 | __smp_rescan_cpus(); | 427 | __smp_rescan_cpus(); |
562 | put_online_cpus(); | 428 | put_online_cpus(); |
@@ -572,19 +438,17 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
572 | preempt_disable(); | 438 | preempt_disable(); |
573 | /* Enable TOD clock interrupts on the secondary cpu. */ | 439 | /* Enable TOD clock interrupts on the secondary cpu. */ |
574 | init_cpu_timer(); | 440 | init_cpu_timer(); |
575 | #ifdef CONFIG_VIRT_TIMER | ||
576 | /* Enable cpu timer interrupts on the secondary cpu. */ | 441 | /* Enable cpu timer interrupts on the secondary cpu. */ |
577 | init_cpu_vtimer(); | 442 | init_cpu_vtimer(); |
578 | #endif | ||
579 | /* Enable pfault pseudo page faults on this cpu. */ | 443 | /* Enable pfault pseudo page faults on this cpu. */ |
580 | pfault_init(); | 444 | pfault_init(); |
581 | 445 | ||
582 | /* call cpu notifiers */ | 446 | /* call cpu notifiers */ |
583 | notify_cpu_starting(smp_processor_id()); | 447 | notify_cpu_starting(smp_processor_id()); |
584 | /* Mark this cpu as online */ | 448 | /* Mark this cpu as online */ |
585 | spin_lock(&call_lock); | 449 | ipi_call_lock(); |
586 | cpu_set(smp_processor_id(), cpu_online_map); | 450 | cpu_set(smp_processor_id(), cpu_online_map); |
587 | spin_unlock(&call_lock); | 451 | ipi_call_unlock(); |
588 | /* Switch on interrupts */ | 452 | /* Switch on interrupts */ |
589 | local_irq_enable(); | 453 | local_irq_enable(); |
590 | /* Print info about this processor */ | 454 | /* Print info about this processor */ |
@@ -633,18 +497,15 @@ static int __cpuinit smp_alloc_lowcore(int cpu) | |||
633 | 497 | ||
634 | save_area = get_zeroed_page(GFP_KERNEL); | 498 | save_area = get_zeroed_page(GFP_KERNEL); |
635 | if (!save_area) | 499 | if (!save_area) |
636 | goto out_save_area; | 500 | goto out; |
637 | lowcore->extended_save_area_addr = (u32) save_area; | 501 | lowcore->extended_save_area_addr = (u32) save_area; |
638 | } | 502 | } |
639 | #endif | 503 | #endif |
640 | lowcore_ptr[cpu] = lowcore; | 504 | lowcore_ptr[cpu] = lowcore; |
641 | return 0; | 505 | return 0; |
642 | 506 | ||
643 | #ifndef CONFIG_64BIT | ||
644 | out_save_area: | ||
645 | free_page(panic_stack); | ||
646 | #endif | ||
647 | out: | 507 | out: |
508 | free_page(panic_stack); | ||
648 | free_pages(async_stack, ASYNC_ORDER); | 509 | free_pages(async_stack, ASYNC_ORDER); |
649 | free_pages((unsigned long) lowcore, lc_order); | 510 | free_pages((unsigned long) lowcore, lc_order); |
650 | return -ENOMEM; | 511 | return -ENOMEM; |
@@ -684,12 +545,8 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
684 | 545 | ||
685 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), | 546 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), |
686 | cpu, sigp_set_prefix); | 547 | cpu, sigp_set_prefix); |
687 | if (ccode) { | 548 | if (ccode) |
688 | printk("sigp_set_prefix failed for cpu %d " | ||
689 | "with condition code %d\n", | ||
690 | (int) cpu, (int) ccode); | ||
691 | return -EIO; | 549 | return -EIO; |
692 | } | ||
693 | 550 | ||
694 | idle = current_set[cpu]; | 551 | idle = current_set[cpu]; |
695 | cpu_lowcore = lowcore_ptr[cpu]; | 552 | cpu_lowcore = lowcore_ptr[cpu]; |
@@ -772,7 +629,7 @@ void __cpu_die(unsigned int cpu) | |||
772 | while (!smp_cpu_not_running(cpu)) | 629 | while (!smp_cpu_not_running(cpu)) |
773 | cpu_relax(); | 630 | cpu_relax(); |
774 | smp_free_lowcore(cpu); | 631 | smp_free_lowcore(cpu); |
775 | printk(KERN_INFO "Processor %d spun down\n", cpu); | 632 | pr_info("Processor %d stopped\n", cpu); |
776 | } | 633 | } |
777 | 634 | ||
778 | void cpu_die(void) | 635 | void cpu_die(void) |