diff options
Diffstat (limited to 'arch/x86/kernel/alternative.c')
-rw-r--r-- | arch/x86/kernel/alternative.c | 40 |
1 files changed, 27 insertions, 13 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index d6405e0842b5..45d79ea890ae 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -273,6 +273,7 @@ struct smp_alt_module { | |||
273 | }; | 273 | }; |
274 | static LIST_HEAD(smp_alt_modules); | 274 | static LIST_HEAD(smp_alt_modules); |
275 | static DEFINE_SPINLOCK(smp_alt); | 275 | static DEFINE_SPINLOCK(smp_alt); |
276 | static int smp_mode = 1; /* protected by smp_alt */ | ||
276 | 277 | ||
277 | void alternatives_smp_module_add(struct module *mod, char *name, | 278 | void alternatives_smp_module_add(struct module *mod, char *name, |
278 | void *locks, void *locks_end, | 279 | void *locks, void *locks_end, |
@@ -341,12 +342,13 @@ void alternatives_smp_switch(int smp) | |||
341 | 342 | ||
342 | #ifdef CONFIG_LOCKDEP | 343 | #ifdef CONFIG_LOCKDEP |
343 | /* | 344 | /* |
344 | * A not yet fixed binutils section handling bug prevents | 345 | * Older binutils section handling bug prevented |
345 | * alternatives-replacement from working reliably, so turn | 346 | * alternatives-replacement from working reliably. |
346 | * it off: | 347 | * |
348 | * If this still occurs then you should see a hang | ||
349 | * or crash shortly after this line: | ||
347 | */ | 350 | */ |
348 | printk("lockdep: not fixing up alternatives.\n"); | 351 | printk("lockdep: fixing up alternatives.\n"); |
349 | return; | ||
350 | #endif | 352 | #endif |
351 | 353 | ||
352 | if (noreplace_smp || smp_alt_once) | 354 | if (noreplace_smp || smp_alt_once) |
@@ -354,21 +356,29 @@ void alternatives_smp_switch(int smp) | |||
354 | BUG_ON(!smp && (num_online_cpus() > 1)); | 356 | BUG_ON(!smp && (num_online_cpus() > 1)); |
355 | 357 | ||
356 | spin_lock_irqsave(&smp_alt, flags); | 358 | spin_lock_irqsave(&smp_alt, flags); |
357 | if (smp) { | 359 | |
360 | /* | ||
361 | * Avoid unnecessary switches because it forces JIT based VMs to | ||
362 | * throw away all cached translations, which can be quite costly. | ||
363 | */ | ||
364 | if (smp == smp_mode) { | ||
365 | /* nothing */ | ||
366 | } else if (smp) { | ||
358 | printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); | 367 | printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); |
359 | clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); | 368 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); |
360 | clear_bit(X86_FEATURE_UP, cpu_data(0).x86_capability); | 369 | clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); |
361 | list_for_each_entry(mod, &smp_alt_modules, next) | 370 | list_for_each_entry(mod, &smp_alt_modules, next) |
362 | alternatives_smp_lock(mod->locks, mod->locks_end, | 371 | alternatives_smp_lock(mod->locks, mod->locks_end, |
363 | mod->text, mod->text_end); | 372 | mod->text, mod->text_end); |
364 | } else { | 373 | } else { |
365 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); | 374 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); |
366 | set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); | 375 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); |
367 | set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability); | 376 | set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); |
368 | list_for_each_entry(mod, &smp_alt_modules, next) | 377 | list_for_each_entry(mod, &smp_alt_modules, next) |
369 | alternatives_smp_unlock(mod->locks, mod->locks_end, | 378 | alternatives_smp_unlock(mod->locks, mod->locks_end, |
370 | mod->text, mod->text_end); | 379 | mod->text, mod->text_end); |
371 | } | 380 | } |
381 | smp_mode = smp; | ||
372 | spin_unlock_irqrestore(&smp_alt, flags); | 382 | spin_unlock_irqrestore(&smp_alt, flags); |
373 | } | 383 | } |
374 | 384 | ||
@@ -431,8 +441,9 @@ void __init alternative_instructions(void) | |||
431 | if (smp_alt_once) { | 441 | if (smp_alt_once) { |
432 | if (1 == num_possible_cpus()) { | 442 | if (1 == num_possible_cpus()) { |
433 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); | 443 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); |
434 | set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); | 444 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); |
435 | set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability); | 445 | set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); |
446 | |||
436 | alternatives_smp_unlock(__smp_locks, __smp_locks_end, | 447 | alternatives_smp_unlock(__smp_locks, __smp_locks_end, |
437 | _text, _etext); | 448 | _text, _etext); |
438 | } | 449 | } |
@@ -440,7 +451,10 @@ void __init alternative_instructions(void) | |||
440 | alternatives_smp_module_add(NULL, "core kernel", | 451 | alternatives_smp_module_add(NULL, "core kernel", |
441 | __smp_locks, __smp_locks_end, | 452 | __smp_locks, __smp_locks_end, |
442 | _text, _etext); | 453 | _text, _etext); |
443 | alternatives_smp_switch(0); | 454 | |
455 | /* Only switch to UP mode if we don't immediately boot others */ | ||
456 | if (num_possible_cpus() == 1 || setup_max_cpus <= 1) | ||
457 | alternatives_smp_switch(0); | ||
444 | } | 458 | } |
445 | #endif | 459 | #endif |
446 | apply_paravirt(__parainstructions, __parainstructions_end); | 460 | apply_paravirt(__parainstructions, __parainstructions_end); |