aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/smp.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2008-12-25 07:38:39 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-12-25 07:38:56 -0500
commitca9fc75a68ee98812bb6d212405fea039421910b (patch)
tree37a48b04355f9fc535c928ef4617e20ae513eaa4 /arch/s390/kernel/smp.c
parent0b3016b781abeabc502042c942cbc611e31250c7 (diff)
[S390] convert s390 to generic IPI infrastructure
Since etr/stp don't need the old smp_call_function semantics anymore we can convert s390 to the generic IPI infrastructure. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r--arch/s390/kernel/smp.c175
1 files changed, 19 insertions, 156 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b5595688a477..176a43e5b8b4 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -77,159 +77,6 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
77 77
78static void smp_ext_bitcall(int, ec_bit_sig); 78static void smp_ext_bitcall(int, ec_bit_sig);
79 79
80/*
81 * Structure and data for __smp_call_function_map(). This is designed to
82 * minimise static memory requirements. It also looks cleaner.
83 */
84static DEFINE_SPINLOCK(call_lock);
85
86struct call_data_struct {
87 void (*func) (void *info);
88 void *info;
89 cpumask_t started;
90 cpumask_t finished;
91 int wait;
92};
93
94static struct call_data_struct *call_data;
95
96/*
97 * 'Call function' interrupt callback
98 */
99static void do_call_function(void)
100{
101 void (*func) (void *info) = call_data->func;
102 void *info = call_data->info;
103 int wait = call_data->wait;
104
105 cpu_set(smp_processor_id(), call_data->started);
106 (*func)(info);
107 if (wait)
108 cpu_set(smp_processor_id(), call_data->finished);;
109}
110
111static void __smp_call_function_map(void (*func) (void *info), void *info,
112 int wait, cpumask_t map)
113{
114 struct call_data_struct data;
115 int cpu, local = 0;
116
117 /*
118 * Can deadlock when interrupts are disabled or if in wrong context.
119 */
120 WARN_ON(irqs_disabled() || in_irq());
121
122 /*
123 * Check for local function call. We have to have the same call order
124 * as in on_each_cpu() because of machine_restart_smp().
125 */
126 if (cpu_isset(smp_processor_id(), map)) {
127 local = 1;
128 cpu_clear(smp_processor_id(), map);
129 }
130
131 cpus_and(map, map, cpu_online_map);
132 if (cpus_empty(map))
133 goto out;
134
135 data.func = func;
136 data.info = info;
137 data.started = CPU_MASK_NONE;
138 data.wait = wait;
139 if (wait)
140 data.finished = CPU_MASK_NONE;
141
142 call_data = &data;
143
144 for_each_cpu_mask(cpu, map)
145 smp_ext_bitcall(cpu, ec_call_function);
146
147 /* Wait for response */
148 while (!cpus_equal(map, data.started))
149 cpu_relax();
150 if (wait)
151 while (!cpus_equal(map, data.finished))
152 cpu_relax();
153out:
154 if (local) {
155 local_irq_disable();
156 func(info);
157 local_irq_enable();
158 }
159}
160
161/*
162 * smp_call_function:
163 * @func: the function to run; this must be fast and non-blocking
164 * @info: an arbitrary pointer to pass to the function
165 * @wait: if true, wait (atomically) until function has completed on other CPUs
166 *
167 * Run a function on all other CPUs.
168 *
169 * You must not call this function with disabled interrupts, from a
170 * hardware interrupt handler or from a bottom half.
171 */
172int smp_call_function(void (*func) (void *info), void *info, int wait)
173{
174 cpumask_t map;
175
176 spin_lock(&call_lock);
177 map = cpu_online_map;
178 cpu_clear(smp_processor_id(), map);
179 __smp_call_function_map(func, info, wait, map);
180 spin_unlock(&call_lock);
181 return 0;
182}
183EXPORT_SYMBOL(smp_call_function);
184
185/*
186 * smp_call_function_single:
187 * @cpu: the CPU where func should run
188 * @func: the function to run; this must be fast and non-blocking
189 * @info: an arbitrary pointer to pass to the function
190 * @wait: if true, wait (atomically) until function has completed on other CPUs
191 *
192 * Run a function on one processor.
193 *
194 * You must not call this function with disabled interrupts, from a
195 * hardware interrupt handler or from a bottom half.
196 */
197int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
198 int wait)
199{
200 spin_lock(&call_lock);
201 __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu));
202 spin_unlock(&call_lock);
203 return 0;
204}
205EXPORT_SYMBOL(smp_call_function_single);
206
207/**
208 * smp_call_function_mask(): Run a function on a set of other CPUs.
209 * @mask: The set of cpus to run on. Must not include the current cpu.
210 * @func: The function to run. This must be fast and non-blocking.
211 * @info: An arbitrary pointer to pass to the function.
212 * @wait: If true, wait (atomically) until function has completed on other CPUs.
213 *
214 * Returns 0 on success, else a negative status code.
215 *
216 * If @wait is true, then returns once @func has returned; otherwise
217 * it returns just before the target cpu calls @func.
218 *
219 * You must not call this function with disabled interrupts or from a
220 * hardware interrupt handler or from a bottom half handler.
221 */
222int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
223 int wait)
224{
225 spin_lock(&call_lock);
226 cpu_clear(smp_processor_id(), mask);
227 __smp_call_function_map(func, info, wait, mask);
228 spin_unlock(&call_lock);
229 return 0;
230}
231EXPORT_SYMBOL(smp_call_function_mask);
232
233void smp_send_stop(void) 80void smp_send_stop(void)
234{ 81{
235 int cpu, rc; 82 int cpu, rc;
@@ -271,7 +118,10 @@ static void do_ext_call_interrupt(__u16 code)
271 bits = xchg(&S390_lowcore.ext_call_fast, 0); 118 bits = xchg(&S390_lowcore.ext_call_fast, 0);
272 119
273 if (test_bit(ec_call_function, &bits)) 120 if (test_bit(ec_call_function, &bits))
274 do_call_function(); 121 generic_smp_call_function_interrupt();
122
123 if (test_bit(ec_call_function_single, &bits))
124 generic_smp_call_function_single_interrupt();
275} 125}
276 126
277/* 127/*
@@ -288,6 +138,19 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
288 udelay(10); 138 udelay(10);
289} 139}
290 140
141void arch_send_call_function_ipi(cpumask_t mask)
142{
143 int cpu;
144
145 for_each_cpu_mask(cpu, mask)
146 smp_ext_bitcall(cpu, ec_call_function);
147}
148
149void arch_send_call_function_single_ipi(int cpu)
150{
151 smp_ext_bitcall(cpu, ec_call_function_single);
152}
153
291#ifndef CONFIG_64BIT 154#ifndef CONFIG_64BIT
292/* 155/*
293 * this function sends a 'purge tlb' signal to another CPU. 156 * this function sends a 'purge tlb' signal to another CPU.
@@ -588,9 +451,9 @@ int __cpuinit start_secondary(void *cpuvoid)
588 /* call cpu notifiers */ 451 /* call cpu notifiers */
589 notify_cpu_starting(smp_processor_id()); 452 notify_cpu_starting(smp_processor_id());
590 /* Mark this cpu as online */ 453 /* Mark this cpu as online */
591 spin_lock(&call_lock); 454 ipi_call_lock();
592 cpu_set(smp_processor_id(), cpu_online_map); 455 cpu_set(smp_processor_id(), cpu_online_map);
593 spin_unlock(&call_lock); 456 ipi_call_unlock();
594 /* Switch on interrupts */ 457 /* Switch on interrupts */
595 local_irq_enable(); 458 local_irq_enable();
596 /* Print info about this processor */ 459 /* Print info about this processor */