diff options
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 192 | ||||
-rw-r--r-- | include/linux/stop_machine.h | 2 | ||||
-rw-r--r-- | kernel/stop_machine.c | 2 |
3 files changed, 42 insertions, 154 deletions
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 3d17bc7f06e6..707b6377adf1 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -137,55 +137,43 @@ static void __init init_table(void) | |||
137 | } | 137 | } |
138 | 138 | ||
139 | struct set_mtrr_data { | 139 | struct set_mtrr_data { |
140 | atomic_t count; | ||
141 | atomic_t gate; | ||
142 | unsigned long smp_base; | 140 | unsigned long smp_base; |
143 | unsigned long smp_size; | 141 | unsigned long smp_size; |
144 | unsigned int smp_reg; | 142 | unsigned int smp_reg; |
145 | mtrr_type smp_type; | 143 | mtrr_type smp_type; |
146 | }; | 144 | }; |
147 | 145 | ||
148 | static DEFINE_PER_CPU(struct cpu_stop_work, mtrr_work); | ||
149 | |||
150 | /** | 146 | /** |
151 | * mtrr_work_handler - Synchronisation handler. Executed by "other" CPUs. | 147 | * mtrr_rendezvous_handler - Work done in the synchronization handler. Executed |
148 | * by all the CPUs. | ||
152 | * @info: pointer to mtrr configuration data | 149 | * @info: pointer to mtrr configuration data |
153 | * | 150 | * |
154 | * Returns nothing. | 151 | * Returns nothing. |
155 | */ | 152 | */ |
156 | static int mtrr_work_handler(void *info) | 153 | static int mtrr_rendezvous_handler(void *info) |
157 | { | 154 | { |
158 | #ifdef CONFIG_SMP | 155 | #ifdef CONFIG_SMP |
159 | struct set_mtrr_data *data = info; | 156 | struct set_mtrr_data *data = info; |
160 | unsigned long flags; | ||
161 | |||
162 | atomic_dec(&data->count); | ||
163 | while (!atomic_read(&data->gate)) | ||
164 | cpu_relax(); | ||
165 | |||
166 | local_irq_save(flags); | ||
167 | |||
168 | atomic_dec(&data->count); | ||
169 | while (atomic_read(&data->gate)) | ||
170 | cpu_relax(); | ||
171 | 157 | ||
172 | /* The master has cleared me to execute */ | 158 | /* |
159 | * We use this same function to initialize the mtrrs during boot, | ||
160 | * resume, runtime cpu online and on an explicit request to set a | ||
161 | * specific MTRR. | ||
162 | * | ||
163 | * During boot or suspend, the state of the boot cpu's mtrrs has been | ||
164 | * saved, and we want to replicate that across all the cpus that come | ||
165 | * online (either at the end of boot or resume or during a runtime cpu | ||
166 | * online). If we're doing that, @reg is set to something special and on | ||
167 | * all the cpu's we do mtrr_if->set_all() (On the logical cpu that | ||
168 | * started the boot/resume sequence, this might be a duplicate | ||
169 | * set_all()). | ||
170 | */ | ||
173 | if (data->smp_reg != ~0U) { | 171 | if (data->smp_reg != ~0U) { |
174 | mtrr_if->set(data->smp_reg, data->smp_base, | 172 | mtrr_if->set(data->smp_reg, data->smp_base, |
175 | data->smp_size, data->smp_type); | 173 | data->smp_size, data->smp_type); |
176 | } else if (mtrr_aps_delayed_init) { | 174 | } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) { |
177 | /* | ||
178 | * Initialize the MTRRs inaddition to the synchronisation. | ||
179 | */ | ||
180 | mtrr_if->set_all(); | 175 | mtrr_if->set_all(); |
181 | } | 176 | } |
182 | |||
183 | atomic_dec(&data->count); | ||
184 | while (!atomic_read(&data->gate)) | ||
185 | cpu_relax(); | ||
186 | |||
187 | atomic_dec(&data->count); | ||
188 | local_irq_restore(flags); | ||
189 | #endif | 177 | #endif |
190 | return 0; | 178 | return 0; |
191 | } | 179 | } |
@@ -223,20 +211,11 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) | |||
223 | * 14. Wait for buddies to catch up | 211 | * 14. Wait for buddies to catch up |
224 | * 15. Enable interrupts. | 212 | * 15. Enable interrupts. |
225 | * | 213 | * |
226 | * What does that mean for us? Well, first we set data.count to the number | 214 | * What does that mean for us? Well, stop_machine() will ensure that |
227 | * of CPUs. As each CPU announces that it started the rendezvous handler by | 215 | * the rendezvous handler is started on each CPU. And in lockstep they |
228 | * decrementing the count, We reset data.count and set the data.gate flag | 216 | * do the state transition of disabling interrupts, updating MTRR's |
229 | * allowing all the cpu's to proceed with the work. As each cpu disables | 217 | * (the CPU vendors may each do it differently, so we call mtrr_if->set() |
230 | * interrupts, it'll decrement data.count once. We wait until it hits 0 and | 218 | * callback and let them take care of it.) and enabling interrupts. |
231 | * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they | ||
232 | * are waiting for that flag to be cleared. Once it's cleared, each | ||
233 | * CPU goes through the transition of updating MTRRs. | ||
234 | * The CPU vendors may each do it differently, | ||
235 | * so we call mtrr_if->set() callback and let them take care of it. | ||
236 | * When they're done, they again decrement data->count and wait for data.gate | ||
237 | * to be set. | ||
238 | * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag | ||
239 | * Everyone then enables interrupts and we all continue on. | ||
240 | * | 219 | * |
241 | * Note that the mechanism is the same for UP systems, too; all the SMP stuff | 220 | * Note that the mechanism is the same for UP systems, too; all the SMP stuff |
242 | * becomes nops. | 221 | * becomes nops. |
@@ -244,115 +223,26 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) | |||
244 | static void | 223 | static void |
245 | set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) | 224 | set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) |
246 | { | 225 | { |
247 | struct set_mtrr_data data; | 226 | struct set_mtrr_data data = { .smp_reg = reg, |
248 | unsigned long flags; | 227 | .smp_base = base, |
249 | int cpu; | 228 | .smp_size = size, |
250 | 229 | .smp_type = type | |
251 | #ifdef CONFIG_SMP | 230 | }; |
252 | /* | ||
253 | * If this cpu is not yet active, we are in the cpu online path. There | ||
254 | * can be no stop_machine() in parallel, as stop machine ensures this | ||
255 | * by using get_online_cpus(). We can skip taking the stop_cpus_mutex, | ||
256 | * as we don't need it and also we can't afford to block while waiting | ||
257 | * for the mutex. | ||
258 | * | ||
259 | * If this cpu is active, we need to prevent stop_machine() happening | ||
260 | * in parallel by taking the stop cpus mutex. | ||
261 | * | ||
262 | * Also, this is called in the context of cpu online path or in the | ||
263 | * context where cpu hotplug is prevented. So checking the active status | ||
264 | * of the raw_smp_processor_id() is safe. | ||
265 | */ | ||
266 | if (cpu_active(raw_smp_processor_id())) | ||
267 | mutex_lock(&stop_cpus_mutex); | ||
268 | #endif | ||
269 | |||
270 | preempt_disable(); | ||
271 | |||
272 | data.smp_reg = reg; | ||
273 | data.smp_base = base; | ||
274 | data.smp_size = size; | ||
275 | data.smp_type = type; | ||
276 | atomic_set(&data.count, num_booting_cpus() - 1); | ||
277 | |||
278 | /* Make sure data.count is visible before unleashing other CPUs */ | ||
279 | smp_wmb(); | ||
280 | atomic_set(&data.gate, 0); | ||
281 | |||
282 | /* Start the ball rolling on other CPUs */ | ||
283 | for_each_online_cpu(cpu) { | ||
284 | struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu); | ||
285 | |||
286 | if (cpu == smp_processor_id()) | ||
287 | continue; | ||
288 | 231 | ||
289 | stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work); | 232 | stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask); |
290 | } | 233 | } |
291 | |||
292 | |||
293 | while (atomic_read(&data.count)) | ||
294 | cpu_relax(); | ||
295 | |||
296 | /* Ok, reset count and toggle gate */ | ||
297 | atomic_set(&data.count, num_booting_cpus() - 1); | ||
298 | smp_wmb(); | ||
299 | atomic_set(&data.gate, 1); | ||
300 | |||
301 | local_irq_save(flags); | ||
302 | |||
303 | while (atomic_read(&data.count)) | ||
304 | cpu_relax(); | ||
305 | |||
306 | /* Ok, reset count and toggle gate */ | ||
307 | atomic_set(&data.count, num_booting_cpus() - 1); | ||
308 | smp_wmb(); | ||
309 | atomic_set(&data.gate, 0); | ||
310 | |||
311 | /* Do our MTRR business */ | ||
312 | |||
313 | /* | ||
314 | * HACK! | ||
315 | * | ||
316 | * We use this same function to initialize the mtrrs during boot, | ||
317 | * resume, runtime cpu online and on an explicit request to set a | ||
318 | * specific MTRR. | ||
319 | * | ||
320 | * During boot or suspend, the state of the boot cpu's mtrrs has been | ||
321 | * saved, and we want to replicate that across all the cpus that come | ||
322 | * online (either at the end of boot or resume or during a runtime cpu | ||
323 | * online). If we're doing that, @reg is set to something special and on | ||
324 | * this cpu we still do mtrr_if->set_all(). During boot/resume, this | ||
325 | * is unnecessary if at this point we are still on the cpu that started | ||
326 | * the boot/resume sequence. But there is no guarantee that we are still | ||
327 | * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be | ||
328 | * sure that we are in sync with everyone else. | ||
329 | */ | ||
330 | if (reg != ~0U) | ||
331 | mtrr_if->set(reg, base, size, type); | ||
332 | else | ||
333 | mtrr_if->set_all(); | ||
334 | |||
335 | /* Wait for the others */ | ||
336 | while (atomic_read(&data.count)) | ||
337 | cpu_relax(); | ||
338 | |||
339 | atomic_set(&data.count, num_booting_cpus() - 1); | ||
340 | smp_wmb(); | ||
341 | atomic_set(&data.gate, 1); | ||
342 | |||
343 | /* | ||
344 | * Wait here for everyone to have seen the gate change | ||
345 | * So we're the last ones to touch 'data' | ||
346 | */ | ||
347 | while (atomic_read(&data.count)) | ||
348 | cpu_relax(); | ||
349 | 234 | ||
350 | local_irq_restore(flags); | 235 | static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base, |
351 | preempt_enable(); | 236 | unsigned long size, mtrr_type type) |
352 | #ifdef CONFIG_SMP | 237 | { |
353 | if (cpu_active(raw_smp_processor_id())) | 238 | struct set_mtrr_data data = { .smp_reg = reg, |
354 | mutex_unlock(&stop_cpus_mutex); | 239 | .smp_base = base, |
355 | #endif | 240 | .smp_size = size, |
241 | .smp_type = type | ||
242 | }; | ||
243 | |||
244 | stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data, | ||
245 | cpu_callout_mask); | ||
356 | } | 246 | } |
357 | 247 | ||
358 | /** | 248 | /** |
@@ -806,7 +696,7 @@ void mtrr_ap_init(void) | |||
806 | * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug | 696 | * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug |
807 | * lock to prevent mtrr entry changes | 697 | * lock to prevent mtrr entry changes |
808 | */ | 698 | */ |
809 | set_mtrr(~0U, 0, 0, 0); | 699 | set_mtrr_from_inactive_cpu(~0U, 0, 0, 0); |
810 | } | 700 | } |
811 | 701 | ||
812 | /** | 702 | /** |
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index e0f2da25d751..4a9d0c7edc65 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h | |||
@@ -27,8 +27,6 @@ struct cpu_stop_work { | |||
27 | struct cpu_stop_done *done; | 27 | struct cpu_stop_done *done; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | extern struct mutex stop_cpus_mutex; | ||
31 | |||
32 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); | 30 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); |
33 | void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, | 31 | void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, |
34 | struct cpu_stop_work *work_buf); | 32 | struct cpu_stop_work *work_buf); |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index e8f05b14cd43..c1124752e1d3 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -132,8 +132,8 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, | |||
132 | cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf); | 132 | cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf); |
133 | } | 133 | } |
134 | 134 | ||
135 | DEFINE_MUTEX(stop_cpus_mutex); | ||
136 | /* static data for stop_cpus */ | 135 | /* static data for stop_cpus */ |
136 | static DEFINE_MUTEX(stop_cpus_mutex); | ||
137 | static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); | 137 | static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); |
138 | 138 | ||
139 | static void queue_stop_cpus_work(const struct cpumask *cpumask, | 139 | static void queue_stop_cpus_work(const struct cpumask *cpumask, |