aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/kernel/smp.c181
1 files changed, 77 insertions, 104 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 83a4ea6e3d60..432deb2d9795 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -54,19 +54,18 @@ cpumask_t cpu_possible_map = CPU_MASK_NONE;
54static struct task_struct *current_set[NR_CPUS]; 54static struct task_struct *current_set[NR_CPUS];
55 55
56static void smp_ext_bitcall(int, ec_bit_sig); 56static void smp_ext_bitcall(int, ec_bit_sig);
57static void smp_ext_bitcall_others(ec_bit_sig);
58 57
59/* 58/*
60 * Structure and data for smp_call_function(). This is designed to minimise 59 * Structure and data for __smp_call_function_map(). This is designed to
61 * static memory requirements. It also looks cleaner. 60 * minimise static memory requirements. It also looks cleaner.
62 */ 61 */
63static DEFINE_SPINLOCK(call_lock); 62static DEFINE_SPINLOCK(call_lock);
64 63
65struct call_data_struct { 64struct call_data_struct {
66 void (*func) (void *info); 65 void (*func) (void *info);
67 void *info; 66 void *info;
68 atomic_t started; 67 cpumask_t started;
69 atomic_t finished; 68 cpumask_t finished;
70 int wait; 69 int wait;
71}; 70};
72 71
@@ -81,118 +80,113 @@ static void do_call_function(void)
81 void *info = call_data->info; 80 void *info = call_data->info;
82 int wait = call_data->wait; 81 int wait = call_data->wait;
83 82
84 atomic_inc(&call_data->started); 83 cpu_set(smp_processor_id(), call_data->started);
85 (*func)(info); 84 (*func)(info);
86 if (wait) 85 if (wait)
87 atomic_inc(&call_data->finished); 86 cpu_set(smp_processor_id(), call_data->finished);;
88} 87}
89 88
90/* 89static void __smp_call_function_map(void (*func) (void *info), void *info,
91 * this function sends a 'generic call function' IPI to all other CPUs 90 int nonatomic, int wait, cpumask_t map)
92 * in the system.
93 */
94
95int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
96 int wait)
97/*
98 * [SUMMARY] Run a function on all other CPUs.
99 * <func> The function to run. This must be fast and non-blocking.
100 * <info> An arbitrary pointer to pass to the function.
101 * <nonatomic> currently unused.
102 * <wait> If true, wait (atomically) until function has completed on other CPUs.
103 * [RETURNS] 0 on success, else a negative status code. Does not return until
104 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
105 *
106 * You must not call this function with disabled interrupts or from a
107 * hardware interrupt handler.
108 */
109{ 91{
110 struct call_data_struct data; 92 struct call_data_struct data;
111 int cpus = num_online_cpus()-1; 93 int cpu, local = 0;
112 94
113 if (cpus <= 0) 95 /*
114 return 0; 96 * Can deadlock when interrupts are disabled or if in wrong context,
97 * caller must disable preemption
98 */
99 WARN_ON(irqs_disabled() || in_irq() || preemptible());
115 100
116 /* Can deadlock when interrupts are disabled or if in wrong context */ 101 /*
117 WARN_ON(irqs_disabled() || in_irq()); 102 * Check for local function call. We have to have the same call order
103 * as in on_each_cpu() because of machine_restart_smp().
104 */
105 if (cpu_isset(smp_processor_id(), map)) {
106 local = 1;
107 cpu_clear(smp_processor_id(), map);
108 }
109
110 cpus_and(map, map, cpu_online_map);
111 if (cpus_empty(map))
112 goto out;
118 113
119 data.func = func; 114 data.func = func;
120 data.info = info; 115 data.info = info;
121 atomic_set(&data.started, 0); 116 data.started = CPU_MASK_NONE;
122 data.wait = wait; 117 data.wait = wait;
123 if (wait) 118 if (wait)
124 atomic_set(&data.finished, 0); 119 data.finished = CPU_MASK_NONE;
125 120
126 spin_lock_bh(&call_lock); 121 spin_lock_bh(&call_lock);
127 call_data = &data; 122 call_data = &data;
128 /* Send a message to all other CPUs and wait for them to respond */ 123
129 smp_ext_bitcall_others(ec_call_function); 124 for_each_cpu_mask(cpu, map)
125 smp_ext_bitcall(cpu, ec_call_function);
130 126
131 /* Wait for response */ 127 /* Wait for response */
132 while (atomic_read(&data.started) != cpus) 128 while (!cpus_equal(map, data.started))
133 cpu_relax(); 129 cpu_relax();
134 130
135 if (wait) 131 if (wait)
136 while (atomic_read(&data.finished) != cpus) 132 while (!cpus_equal(map, data.finished))
137 cpu_relax(); 133 cpu_relax();
134
138 spin_unlock_bh(&call_lock); 135 spin_unlock_bh(&call_lock);
139 136
140 return 0; 137out:
138 local_irq_disable();
139 if (local)
140 func(info);
141 local_irq_enable();
141} 142}
142 143
143/* 144/*
144 * Call a function on one CPU 145 * smp_call_function:
145 * cpu : the CPU the function should be executed on 146 * @func: the function to run; this must be fast and non-blocking
147 * @info: an arbitrary pointer to pass to the function
148 * @nonatomic: unused
149 * @wait: if true, wait (atomically) until function has completed on other CPUs
146 * 150 *
147 * You must not call this function with disabled interrupts or from a 151 * Run a function on all other CPUs.
148 * hardware interrupt handler. You may call it from a bottom half.
149 * 152 *
150 * It is guaranteed that the called function runs on the specified CPU, 153 * You must not call this function with disabled interrupts or from a
151 * preemption is disabled. 154 * hardware interrupt handler. Must be called with preemption disabled.
155 * You may call it from a bottom half.
152 */ 156 */
153int smp_call_function_on(void (*func) (void *info), void *info, 157int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
154 int nonatomic, int wait, int cpu) 158 int wait)
155{ 159{
156 struct call_data_struct data; 160 cpumask_t map;
157 int curr_cpu;
158
159 if (!cpu_online(cpu))
160 return -EINVAL;
161
162 /* Can deadlock when interrupts are disabled or if in wrong context */
163 WARN_ON(irqs_disabled() || in_irq());
164
165 /* disable preemption for local function call */
166 curr_cpu = get_cpu();
167
168 if (curr_cpu == cpu) {
169 /* direct call to function */
170 func(info);
171 put_cpu();
172 return 0;
173 }
174
175 data.func = func;
176 data.info = info;
177 atomic_set(&data.started, 0);
178 data.wait = wait;
179 if (wait)
180 atomic_set(&data.finished, 0);
181
182 spin_lock_bh(&call_lock);
183 call_data = &data;
184 smp_ext_bitcall(cpu, ec_call_function);
185 161
186 /* Wait for response */ 162 map = cpu_online_map;
187 while (atomic_read(&data.started) != 1) 163 cpu_clear(smp_processor_id(), map);
188 cpu_relax(); 164 __smp_call_function_map(func, info, nonatomic, wait, map);
165 return 0;
166}
167EXPORT_SYMBOL(smp_call_function);
189 168
190 if (wait) 169/*
191 while (atomic_read(&data.finished) != 1) 170 * smp_call_function_on:
192 cpu_relax(); 171 * @func: the function to run; this must be fast and non-blocking
172 * @info: an arbitrary pointer to pass to the function
173 * @nonatomic: unused
174 * @wait: if true, wait (atomically) until function has completed on other CPUs
175 * @cpu: the CPU where func should run
176 *
177 * Run a function on one processor.
178 *
179 * You must not call this function with disabled interrupts or from a
180 * hardware interrupt handler. Must be called with preemption disabled.
181 * You may call it from a bottom half.
182 */
183int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
184 int wait, int cpu)
185{
186 cpumask_t map = CPU_MASK_NONE;
193 187
194 spin_unlock_bh(&call_lock); 188 cpu_set(cpu, map);
195 put_cpu(); 189 __smp_call_function_map(func, info, nonatomic, wait, map);
196 return 0; 190 return 0;
197} 191}
198EXPORT_SYMBOL(smp_call_function_on); 192EXPORT_SYMBOL(smp_call_function_on);
@@ -325,26 +319,6 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
325 udelay(10); 319 udelay(10);
326} 320}
327 321
328/*
329 * Send an external call sigp to every other cpu in the system and
330 * return without waiting for its completion.
331 */
332static void smp_ext_bitcall_others(ec_bit_sig sig)
333{
334 int cpu;
335
336 for_each_online_cpu(cpu) {
337 if (cpu == smp_processor_id())
338 continue;
339 /*
340 * Set signaling bit in lowcore of target cpu and kick it
341 */
342 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
343 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
344 udelay(10);
345 }
346}
347
348#ifndef CONFIG_64BIT 322#ifndef CONFIG_64BIT
349/* 323/*
350 * this function sends a 'purge tlb' signal to another CPU. 324 * this function sends a 'purge tlb' signal to another CPU.
@@ -807,6 +781,5 @@ EXPORT_SYMBOL(cpu_possible_map);
807EXPORT_SYMBOL(lowcore_ptr); 781EXPORT_SYMBOL(lowcore_ptr);
808EXPORT_SYMBOL(smp_ctl_set_bit); 782EXPORT_SYMBOL(smp_ctl_set_bit);
809EXPORT_SYMBOL(smp_ctl_clear_bit); 783EXPORT_SYMBOL(smp_ctl_clear_bit);
810EXPORT_SYMBOL(smp_call_function);
811EXPORT_SYMBOL(smp_get_cpu); 784EXPORT_SYMBOL(smp_get_cpu);
812EXPORT_SYMBOL(smp_put_cpu); 785EXPORT_SYMBOL(smp_put_cpu);