aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-15 15:55:59 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-15 15:55:59 -0400
commit1a781a777b2f6ac46523fe92396215762ced624d (patch)
tree4f34bb4aade85c0eb364b53d664ec7f6ab959006 /arch/s390
parentb9d2252c1e44fa83a4e65fdc9eb93db6297c55af (diff)
parent42a2f217a5e324ed5f2373ab1b7a0a15187c4d6c (diff)
Merge branch 'generic-ipi' into generic-ipi-for-linus
Conflicts: arch/powerpc/Kconfig arch/s390/kernel/time.c arch/x86/kernel/apic_32.c arch/x86/kernel/cpu/perfctr-watchdog.c arch/x86/kernel/i8259_64.c arch/x86/kernel/ldt.c arch/x86/kernel/nmi_64.c arch/x86/kernel/smpboot.c arch/x86/xen/smp.c include/asm-x86/hw_irq_32.h include/asm-x86/hw_irq_64.h include/asm-x86/mach-default/irq_vectors.h include/asm-x86/mach-voyager/irq_vectors.h include/asm-x86/smp.h kernel/Makefile Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/appldata/appldata_base.c4
-rw-r--r--arch/s390/kernel/smp.c22
-rw-r--r--arch/s390/kernel/time.c6
3 files changed, 14 insertions, 18 deletions
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 9cb3d92447a3..a7f8979fb925 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -203,7 +203,7 @@ __appldata_vtimer_setup(int cmd)
203 per_cpu(appldata_timer, i).expires = per_cpu_interval; 203 per_cpu(appldata_timer, i).expires = per_cpu_interval;
204 smp_call_function_single(i, add_virt_timer_periodic, 204 smp_call_function_single(i, add_virt_timer_periodic,
205 &per_cpu(appldata_timer, i), 205 &per_cpu(appldata_timer, i),
206 0, 1); 206 1);
207 } 207 }
208 appldata_timer_active = 1; 208 appldata_timer_active = 1;
209 break; 209 break;
@@ -228,7 +228,7 @@ __appldata_vtimer_setup(int cmd)
228 args.timer = &per_cpu(appldata_timer, i); 228 args.timer = &per_cpu(appldata_timer, i);
229 args.expires = per_cpu_interval; 229 args.expires = per_cpu_interval;
230 smp_call_function_single(i, __appldata_mod_vtimer_wrap, 230 smp_call_function_single(i, __appldata_mod_vtimer_wrap,
231 &args, 0, 1); 231 &args, 1);
232 } 232 }
233 } 233 }
234} 234}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 5d4fa4b1c74c..b6781030cfbd 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -109,7 +109,7 @@ static void do_call_function(void)
109} 109}
110 110
111static void __smp_call_function_map(void (*func) (void *info), void *info, 111static void __smp_call_function_map(void (*func) (void *info), void *info,
112 int nonatomic, int wait, cpumask_t map) 112 int wait, cpumask_t map)
113{ 113{
114 struct call_data_struct data; 114 struct call_data_struct data;
115 int cpu, local = 0; 115 int cpu, local = 0;
@@ -162,7 +162,6 @@ out:
162 * smp_call_function: 162 * smp_call_function:
163 * @func: the function to run; this must be fast and non-blocking 163 * @func: the function to run; this must be fast and non-blocking
164 * @info: an arbitrary pointer to pass to the function 164 * @info: an arbitrary pointer to pass to the function
165 * @nonatomic: unused
166 * @wait: if true, wait (atomically) until function has completed on other CPUs 165 * @wait: if true, wait (atomically) until function has completed on other CPUs
167 * 166 *
168 * Run a function on all other CPUs. 167 * Run a function on all other CPUs.
@@ -170,15 +169,14 @@ out:
170 * You must not call this function with disabled interrupts, from a 169 * You must not call this function with disabled interrupts, from a
171 * hardware interrupt handler or from a bottom half. 170 * hardware interrupt handler or from a bottom half.
172 */ 171 */
173int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 172int smp_call_function(void (*func) (void *info), void *info, int wait)
174 int wait)
175{ 173{
176 cpumask_t map; 174 cpumask_t map;
177 175
178 spin_lock(&call_lock); 176 spin_lock(&call_lock);
179 map = cpu_online_map; 177 map = cpu_online_map;
180 cpu_clear(smp_processor_id(), map); 178 cpu_clear(smp_processor_id(), map);
181 __smp_call_function_map(func, info, nonatomic, wait, map); 179 __smp_call_function_map(func, info, wait, map);
182 spin_unlock(&call_lock); 180 spin_unlock(&call_lock);
183 return 0; 181 return 0;
184} 182}
@@ -189,7 +187,6 @@ EXPORT_SYMBOL(smp_call_function);
189 * @cpu: the CPU where func should run 187 * @cpu: the CPU where func should run
190 * @func: the function to run; this must be fast and non-blocking 188 * @func: the function to run; this must be fast and non-blocking
191 * @info: an arbitrary pointer to pass to the function 189 * @info: an arbitrary pointer to pass to the function
192 * @nonatomic: unused
193 * @wait: if true, wait (atomically) until function has completed on other CPUs 190 * @wait: if true, wait (atomically) until function has completed on other CPUs
194 * 191 *
195 * Run a function on one processor. 192 * Run a function on one processor.
@@ -198,11 +195,10 @@ EXPORT_SYMBOL(smp_call_function);
198 * hardware interrupt handler or from a bottom half. 195 * hardware interrupt handler or from a bottom half.
199 */ 196 */
200int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 197int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
201 int nonatomic, int wait) 198 int wait)
202{ 199{
203 spin_lock(&call_lock); 200 spin_lock(&call_lock);
204 __smp_call_function_map(func, info, nonatomic, wait, 201 __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu));
205 cpumask_of_cpu(cpu));
206 spin_unlock(&call_lock); 202 spin_unlock(&call_lock);
207 return 0; 203 return 0;
208} 204}
@@ -228,7 +224,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
228{ 224{
229 spin_lock(&call_lock); 225 spin_lock(&call_lock);
230 cpu_clear(smp_processor_id(), mask); 226 cpu_clear(smp_processor_id(), mask);
231 __smp_call_function_map(func, info, 0, wait, mask); 227 __smp_call_function_map(func, info, wait, mask);
232 spin_unlock(&call_lock); 228 spin_unlock(&call_lock);
233 return 0; 229 return 0;
234} 230}
@@ -303,7 +299,7 @@ static void smp_ptlb_callback(void *info)
303 299
304void smp_ptlb_all(void) 300void smp_ptlb_all(void)
305{ 301{
306 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 302 on_each_cpu(smp_ptlb_callback, NULL, 1);
307} 303}
308EXPORT_SYMBOL(smp_ptlb_all); 304EXPORT_SYMBOL(smp_ptlb_all);
309#endif /* ! CONFIG_64BIT */ 305#endif /* ! CONFIG_64BIT */
@@ -351,7 +347,7 @@ void smp_ctl_set_bit(int cr, int bit)
351 memset(&parms.orvals, 0, sizeof(parms.orvals)); 347 memset(&parms.orvals, 0, sizeof(parms.orvals));
352 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 348 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
353 parms.orvals[cr] = 1 << bit; 349 parms.orvals[cr] = 1 << bit;
354 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 350 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
355} 351}
356EXPORT_SYMBOL(smp_ctl_set_bit); 352EXPORT_SYMBOL(smp_ctl_set_bit);
357 353
@@ -365,7 +361,7 @@ void smp_ctl_clear_bit(int cr, int bit)
365 memset(&parms.orvals, 0, sizeof(parms.orvals)); 361 memset(&parms.orvals, 0, sizeof(parms.orvals));
366 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 362 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
367 parms.andvals[cr] = ~(1L << bit); 363 parms.andvals[cr] = ~(1L << bit);
368 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 364 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
369} 365}
370EXPORT_SYMBOL(smp_ctl_clear_bit); 366EXPORT_SYMBOL(smp_ctl_clear_bit);
371 367
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 7418bebb547f..8051e9326dfc 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -707,7 +707,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
707 */ 707 */
708 memset(&etr_sync, 0, sizeof(etr_sync)); 708 memset(&etr_sync, 0, sizeof(etr_sync));
709 preempt_disable(); 709 preempt_disable();
710 smp_call_function(clock_sync_cpu_start, &etr_sync, 0, 0); 710 smp_call_function(clock_sync_cpu_start, &etr_sync, 0);
711 local_irq_disable(); 711 local_irq_disable();
712 enable_sync_clock(); 712 enable_sync_clock();
713 713
@@ -746,7 +746,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
746 rc = -EAGAIN; 746 rc = -EAGAIN;
747 } 747 }
748 local_irq_enable(); 748 local_irq_enable();
749 smp_call_function(clock_sync_cpu_end, NULL, 0, 0); 749 smp_call_function(clock_sync_cpu_end, NULL, 0);
750 preempt_enable(); 750 preempt_enable();
751 return rc; 751 return rc;
752} 752}
@@ -926,7 +926,7 @@ static void etr_work_fn(struct work_struct *work)
926 if (!eacr.ea) { 926 if (!eacr.ea) {
927 /* Both ports offline. Reset everything. */ 927 /* Both ports offline. Reset everything. */
928 eacr.dp = eacr.es = eacr.sl = 0; 928 eacr.dp = eacr.es = eacr.sl = 0;
929 on_each_cpu(disable_sync_clock, NULL, 0, 1); 929 on_each_cpu(disable_sync_clock, NULL, 1);
930 del_timer_sync(&etr_timer); 930 del_timer_sync(&etr_timer);
931 etr_update_eacr(eacr); 931 etr_update_eacr(eacr);
932 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); 932 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);