aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-01 13:46:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-01 13:46:27 -0400
commitda8347969f324db5f572581397d9b3a8e108cda4 (patch)
tree7df2ea8968ecb92e307bbffdbe8f9bcd0c79c36a
parent80749df4a1492004fdb7bd2cec094b92260c6d27 (diff)
parentc416ddf5b909736f5b57d348f5de159693e699ad (diff)
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86/asm changes from Ingo Molnar: "The one change that stands out is the alternatives patching change that prevents us from ever patching back instructions from SMP to UP: this simplifies things and speeds up CPU hotplug. Other than that it's smaller fixes, cleanups and improvements." * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86: Unspaghettize do_trap() x86_64: Work around old GAS bug x86: Use REP BSF unconditionally x86: Prefer TZCNT over BFS x86/64: Adjust types of temporaries used by ffs()/fls()/fls64() x86: Drop unnecessary kernel_eflags variable on 64-bit x86/smp: Don't ever patch back to UP if we unplug cpus
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--arch/x86/include/asm/alternative.h4
-rw-r--r--arch/x86/include/asm/bitops.h14
-rw-r--r--arch/x86/include/asm/calling.h48
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/kernel/alternative.c107
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/entry_64.S22
-rw-r--r--arch/x86/kernel/smpboot.c20
-rw-r--r--arch/x86/kernel/traps.c60
-rw-r--r--arch/x86/xen/smp.c6
-rw-r--r--kernel/cpu.c11
12 files changed, 101 insertions, 199 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 55ada0471f93..d1cda8d892aa 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2649,9 +2649,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2649 smart2= [HW] 2649 smart2= [HW]
2650 Format: <io1>[,<io2>[,...,<io8>]] 2650 Format: <io1>[,<io2>[,...,<io8>]]
2651 2651
2652 smp-alt-once [X86-32,SMP] On a hotplug CPU system, only
2653 attempt to substitute SMP alternatives once at boot.
2654
2655 smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices 2652 smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices
2656 smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port 2653 smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port
2657 smsc-ircc2.ircc_sir= [HW] SIR base I/O port 2654 smsc-ircc2.ircc_sir= [HW] SIR base I/O port
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 70780689599a..444704c8e186 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -60,7 +60,7 @@ extern void alternatives_smp_module_add(struct module *mod, char *name,
60 void *locks, void *locks_end, 60 void *locks, void *locks_end,
61 void *text, void *text_end); 61 void *text, void *text_end);
62extern void alternatives_smp_module_del(struct module *mod); 62extern void alternatives_smp_module_del(struct module *mod);
63extern void alternatives_smp_switch(int smp); 63extern void alternatives_enable_smp(void);
64extern int alternatives_text_reserved(void *start, void *end); 64extern int alternatives_text_reserved(void *start, void *end);
65extern bool skip_smp_alternatives; 65extern bool skip_smp_alternatives;
66#else 66#else
@@ -68,7 +68,7 @@ static inline void alternatives_smp_module_add(struct module *mod, char *name,
68 void *locks, void *locks_end, 68 void *locks, void *locks_end,
69 void *text, void *text_end) {} 69 void *text, void *text_end) {}
70static inline void alternatives_smp_module_del(struct module *mod) {} 70static inline void alternatives_smp_module_del(struct module *mod) {}
71static inline void alternatives_smp_switch(int smp) {} 71static inline void alternatives_enable_smp(void) {}
72static inline int alternatives_text_reserved(void *start, void *end) 72static inline int alternatives_text_reserved(void *start, void *end)
73{ 73{
74 return 0; 74 return 0;
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 72f5009deb5a..6dfd0195bb55 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -355,7 +355,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
355 */ 355 */
356static inline unsigned long __ffs(unsigned long word) 356static inline unsigned long __ffs(unsigned long word)
357{ 357{
358 asm("bsf %1,%0" 358 asm("rep; bsf %1,%0"
359 : "=r" (word) 359 : "=r" (word)
360 : "rm" (word)); 360 : "rm" (word));
361 return word; 361 return word;
@@ -369,7 +369,7 @@ static inline unsigned long __ffs(unsigned long word)
369 */ 369 */
370static inline unsigned long ffz(unsigned long word) 370static inline unsigned long ffz(unsigned long word)
371{ 371{
372 asm("bsf %1,%0" 372 asm("rep; bsf %1,%0"
373 : "=r" (word) 373 : "=r" (word)
374 : "r" (~word)); 374 : "r" (~word));
375 return word; 375 return word;
@@ -417,10 +417,9 @@ static inline int ffs(int x)
417 * We cannot do this on 32 bits because at the very least some 417 * We cannot do this on 32 bits because at the very least some
418 * 486 CPUs did not behave this way. 418 * 486 CPUs did not behave this way.
419 */ 419 */
420 long tmp = -1;
421 asm("bsfl %1,%0" 420 asm("bsfl %1,%0"
422 : "=r" (r) 421 : "=r" (r)
423 : "rm" (x), "0" (tmp)); 422 : "rm" (x), "0" (-1));
424#elif defined(CONFIG_X86_CMOV) 423#elif defined(CONFIG_X86_CMOV)
425 asm("bsfl %1,%0\n\t" 424 asm("bsfl %1,%0\n\t"
426 "cmovzl %2,%0" 425 "cmovzl %2,%0"
@@ -459,10 +458,9 @@ static inline int fls(int x)
459 * We cannot do this on 32 bits because at the very least some 458 * We cannot do this on 32 bits because at the very least some
460 * 486 CPUs did not behave this way. 459 * 486 CPUs did not behave this way.
461 */ 460 */
462 long tmp = -1;
463 asm("bsrl %1,%0" 461 asm("bsrl %1,%0"
464 : "=r" (r) 462 : "=r" (r)
465 : "rm" (x), "0" (tmp)); 463 : "rm" (x), "0" (-1));
466#elif defined(CONFIG_X86_CMOV) 464#elif defined(CONFIG_X86_CMOV)
467 asm("bsrl %1,%0\n\t" 465 asm("bsrl %1,%0\n\t"
468 "cmovzl %2,%0" 466 "cmovzl %2,%0"
@@ -490,13 +488,13 @@ static inline int fls(int x)
490#ifdef CONFIG_X86_64 488#ifdef CONFIG_X86_64
491static __always_inline int fls64(__u64 x) 489static __always_inline int fls64(__u64 x)
492{ 490{
493 long bitpos = -1; 491 int bitpos = -1;
494 /* 492 /*
495 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the 493 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
496 * dest reg is undefined if x==0, but their CPU architect says its 494 * dest reg is undefined if x==0, but their CPU architect says its
497 * value is written to set it to the same as before. 495 * value is written to set it to the same as before.
498 */ 496 */
499 asm("bsrq %1,%0" 497 asm("bsrq %1,%q0"
500 : "+r" (bitpos) 498 : "+r" (bitpos)
501 : "rm" (x)); 499 : "rm" (x));
502 return bitpos + 1; 500 return bitpos + 1;
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index a9e3a740f697..7f8422a28a46 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -49,38 +49,36 @@ For 32-bit we have the following conventions - kernel is built with
49#include "dwarf2.h" 49#include "dwarf2.h"
50 50
51/* 51/*
52 * 64-bit system call stack frame layout defines and helpers, for 52 * 64-bit system call stack frame layout defines and helpers,
53 * assembly code (note that the seemingly unnecessary parentheses 53 * for assembly code:
54 * are to prevent cpp from inserting spaces in expressions that get
55 * passed to macros):
56 */ 54 */
57 55
58#define R15 (0) 56#define R15 0
59#define R14 (8) 57#define R14 8
60#define R13 (16) 58#define R13 16
61#define R12 (24) 59#define R12 24
62#define RBP (32) 60#define RBP 32
63#define RBX (40) 61#define RBX 40
64 62
65/* arguments: interrupts/non tracing syscalls only save up to here: */ 63/* arguments: interrupts/non tracing syscalls only save up to here: */
66#define R11 (48) 64#define R11 48
67#define R10 (56) 65#define R10 56
68#define R9 (64) 66#define R9 64
69#define R8 (72) 67#define R8 72
70#define RAX (80) 68#define RAX 80
71#define RCX (88) 69#define RCX 88
72#define RDX (96) 70#define RDX 96
73#define RSI (104) 71#define RSI 104
74#define RDI (112) 72#define RDI 112
75#define ORIG_RAX (120) /* + error_code */ 73#define ORIG_RAX 120 /* + error_code */
76/* end of arguments */ 74/* end of arguments */
77 75
78/* cpu exception frame or undefined in case of fast syscall: */ 76/* cpu exception frame or undefined in case of fast syscall: */
79#define RIP (128) 77#define RIP 128
80#define CS (136) 78#define CS 136
81#define EFLAGS (144) 79#define EFLAGS 144
82#define RSP (152) 80#define RSP 152
83#define SS (160) 81#define SS 160
84 82
85#define ARGOFFSET R11 83#define ARGOFFSET R11
86#define SWFRAME ORIG_RAX 84#define SWFRAME ORIG_RAX
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 433d2e5c98a7..b98c0d958ebb 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -423,7 +423,6 @@ DECLARE_INIT_PER_CPU(irq_stack_union);
423 423
424DECLARE_PER_CPU(char *, irq_stack_ptr); 424DECLARE_PER_CPU(char *, irq_stack_ptr);
425DECLARE_PER_CPU(unsigned int, irq_count); 425DECLARE_PER_CPU(unsigned int, irq_count);
426extern unsigned long kernel_eflags;
427extern asmlinkage void ignore_sysret(void); 426extern asmlinkage void ignore_sysret(void);
428#else /* X86_64 */ 427#else /* X86_64 */
429#ifdef CONFIG_CC_STACKPROTECTOR 428#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index ced4534baed5..357475a87b52 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -23,19 +23,6 @@
23 23
24#define MAX_PATCH_LEN (255-1) 24#define MAX_PATCH_LEN (255-1)
25 25
26#ifdef CONFIG_HOTPLUG_CPU
27static int smp_alt_once;
28
29static int __init bootonly(char *str)
30{
31 smp_alt_once = 1;
32 return 1;
33}
34__setup("smp-alt-boot", bootonly);
35#else
36#define smp_alt_once 1
37#endif
38
39static int __initdata_or_module debug_alternative; 26static int __initdata_or_module debug_alternative;
40 27
41static int __init debug_alt(char *str) 28static int __init debug_alt(char *str)
@@ -326,9 +313,6 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
326{ 313{
327 const s32 *poff; 314 const s32 *poff;
328 315
329 if (noreplace_smp)
330 return;
331
332 mutex_lock(&text_mutex); 316 mutex_lock(&text_mutex);
333 for (poff = start; poff < end; poff++) { 317 for (poff = start; poff < end; poff++) {
334 u8 *ptr = (u8 *)poff + *poff; 318 u8 *ptr = (u8 *)poff + *poff;
@@ -359,7 +343,7 @@ struct smp_alt_module {
359}; 343};
360static LIST_HEAD(smp_alt_modules); 344static LIST_HEAD(smp_alt_modules);
361static DEFINE_MUTEX(smp_alt); 345static DEFINE_MUTEX(smp_alt);
362static int smp_mode = 1; /* protected by smp_alt */ 346static bool uniproc_patched = false; /* protected by smp_alt */
363 347
364void __init_or_module alternatives_smp_module_add(struct module *mod, 348void __init_or_module alternatives_smp_module_add(struct module *mod,
365 char *name, 349 char *name,
@@ -368,19 +352,18 @@ void __init_or_module alternatives_smp_module_add(struct module *mod,
368{ 352{
369 struct smp_alt_module *smp; 353 struct smp_alt_module *smp;
370 354
371 if (noreplace_smp) 355 mutex_lock(&smp_alt);
372 return; 356 if (!uniproc_patched)
357 goto unlock;
373 358
374 if (smp_alt_once) { 359 if (num_possible_cpus() == 1)
375 if (boot_cpu_has(X86_FEATURE_UP)) 360 /* Don't bother remembering, we'll never have to undo it. */
376 alternatives_smp_unlock(locks, locks_end, 361 goto smp_unlock;
377 text, text_end);
378 return;
379 }
380 362
381 smp = kzalloc(sizeof(*smp), GFP_KERNEL); 363 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
382 if (NULL == smp) 364 if (NULL == smp)
383 return; /* we'll run the (safe but slow) SMP code then ... */ 365 /* we'll run the (safe but slow) SMP code then ... */
366 goto unlock;
384 367
385 smp->mod = mod; 368 smp->mod = mod;
386 smp->name = name; 369 smp->name = name;
@@ -392,11 +375,10 @@ void __init_or_module alternatives_smp_module_add(struct module *mod,
392 __func__, smp->locks, smp->locks_end, 375 __func__, smp->locks, smp->locks_end,
393 smp->text, smp->text_end, smp->name); 376 smp->text, smp->text_end, smp->name);
394 377
395 mutex_lock(&smp_alt);
396 list_add_tail(&smp->next, &smp_alt_modules); 378 list_add_tail(&smp->next, &smp_alt_modules);
397 if (boot_cpu_has(X86_FEATURE_UP)) 379smp_unlock:
398 alternatives_smp_unlock(smp->locks, smp->locks_end, 380 alternatives_smp_unlock(locks, locks_end, text, text_end);
399 smp->text, smp->text_end); 381unlock:
400 mutex_unlock(&smp_alt); 382 mutex_unlock(&smp_alt);
401} 383}
402 384
@@ -404,24 +386,18 @@ void __init_or_module alternatives_smp_module_del(struct module *mod)
404{ 386{
405 struct smp_alt_module *item; 387 struct smp_alt_module *item;
406 388
407 if (smp_alt_once || noreplace_smp)
408 return;
409
410 mutex_lock(&smp_alt); 389 mutex_lock(&smp_alt);
411 list_for_each_entry(item, &smp_alt_modules, next) { 390 list_for_each_entry(item, &smp_alt_modules, next) {
412 if (mod != item->mod) 391 if (mod != item->mod)
413 continue; 392 continue;
414 list_del(&item->next); 393 list_del(&item->next);
415 mutex_unlock(&smp_alt);
416 DPRINTK("%s: %s\n", __func__, item->name);
417 kfree(item); 394 kfree(item);
418 return; 395 break;
419 } 396 }
420 mutex_unlock(&smp_alt); 397 mutex_unlock(&smp_alt);
421} 398}
422 399
423bool skip_smp_alternatives; 400void alternatives_enable_smp(void)
424void alternatives_smp_switch(int smp)
425{ 401{
426 struct smp_alt_module *mod; 402 struct smp_alt_module *mod;
427 403
@@ -436,34 +412,21 @@ void alternatives_smp_switch(int smp)
436 pr_info("lockdep: fixing up alternatives\n"); 412 pr_info("lockdep: fixing up alternatives\n");
437#endif 413#endif
438 414
439 if (noreplace_smp || smp_alt_once || skip_smp_alternatives) 415 /* Why bother if there are no other CPUs? */
440 return; 416 BUG_ON(num_possible_cpus() == 1);
441 BUG_ON(!smp && (num_online_cpus() > 1));
442 417
443 mutex_lock(&smp_alt); 418 mutex_lock(&smp_alt);
444 419
445 /* 420 if (uniproc_patched) {
446 * Avoid unnecessary switches because it forces JIT based VMs to
447 * throw away all cached translations, which can be quite costly.
448 */
449 if (smp == smp_mode) {
450 /* nothing */
451 } else if (smp) {
452 pr_info("switching to SMP code\n"); 421 pr_info("switching to SMP code\n");
422 BUG_ON(num_online_cpus() != 1);
453 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); 423 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
454 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); 424 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
455 list_for_each_entry(mod, &smp_alt_modules, next) 425 list_for_each_entry(mod, &smp_alt_modules, next)
456 alternatives_smp_lock(mod->locks, mod->locks_end, 426 alternatives_smp_lock(mod->locks, mod->locks_end,
457 mod->text, mod->text_end); 427 mod->text, mod->text_end);
458 } else { 428 uniproc_patched = false;
459 pr_info("switching to UP code\n");
460 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
461 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
462 list_for_each_entry(mod, &smp_alt_modules, next)
463 alternatives_smp_unlock(mod->locks, mod->locks_end,
464 mod->text, mod->text_end);
465 } 429 }
466 smp_mode = smp;
467 mutex_unlock(&smp_alt); 430 mutex_unlock(&smp_alt);
468} 431}
469 432
@@ -540,40 +503,22 @@ void __init alternative_instructions(void)
540 503
541 apply_alternatives(__alt_instructions, __alt_instructions_end); 504 apply_alternatives(__alt_instructions, __alt_instructions_end);
542 505
543 /* switch to patch-once-at-boottime-only mode and free the
544 * tables in case we know the number of CPUs will never ever
545 * change */
546#ifdef CONFIG_HOTPLUG_CPU
547 if (num_possible_cpus() < 2)
548 smp_alt_once = 1;
549#endif
550
551#ifdef CONFIG_SMP 506#ifdef CONFIG_SMP
552 if (smp_alt_once) { 507 /* Patch to UP if other cpus not imminent. */
553 if (1 == num_possible_cpus()) { 508 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
554 pr_info("switching to UP code\n"); 509 uniproc_patched = true;
555 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
556 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
557
558 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
559 _text, _etext);
560 }
561 } else {
562 alternatives_smp_module_add(NULL, "core kernel", 510 alternatives_smp_module_add(NULL, "core kernel",
563 __smp_locks, __smp_locks_end, 511 __smp_locks, __smp_locks_end,
564 _text, _etext); 512 _text, _etext);
565
566 /* Only switch to UP mode if we don't immediately boot others */
567 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
568 alternatives_smp_switch(0);
569 } 513 }
570#endif
571 apply_paravirt(__parainstructions, __parainstructions_end);
572 514
573 if (smp_alt_once) 515 if (!uniproc_patched || num_possible_cpus() == 1)
574 free_init_pages("SMP alternatives", 516 free_init_pages("SMP alternatives",
575 (unsigned long)__smp_locks, 517 (unsigned long)__smp_locks,
576 (unsigned long)__smp_locks_end); 518 (unsigned long)__smp_locks_end);
519#endif
520
521 apply_paravirt(__parainstructions, __parainstructions_end);
577 522
578 restart_nmi(); 523 restart_nmi();
579} 524}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a5fbc3c5fccc..9961e2e23709 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1116,8 +1116,6 @@ void syscall_init(void)
1116 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); 1116 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
1117} 1117}
1118 1118
1119unsigned long kernel_eflags;
1120
1121/* 1119/*
1122 * Copies of the original ist values from the tss are only accessed during 1120 * Copies of the original ist values from the tss are only accessed during
1123 * debugging, no special alignment required. 1121 * debugging, no special alignment required.
@@ -1299,8 +1297,6 @@ void __cpuinit cpu_init(void)
1299 fpu_init(); 1297 fpu_init();
1300 xsave_init(); 1298 xsave_init();
1301 1299
1302 raw_local_save_flags(kernel_eflags);
1303
1304 if (is_uv_system()) 1300 if (is_uv_system())
1305 uv_cpu_init(); 1301 uv_cpu_init();
1306} 1302}
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 4f0322e4ecee..066334be7b74 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -445,15 +445,15 @@ ENDPROC(native_usergs_sysret64)
445 .macro SAVE_ARGS_IRQ 445 .macro SAVE_ARGS_IRQ
446 cld 446 cld
447 /* start from rbp in pt_regs and jump over */ 447 /* start from rbp in pt_regs and jump over */
448 movq_cfi rdi, RDI-RBP 448 movq_cfi rdi, (RDI-RBP)
449 movq_cfi rsi, RSI-RBP 449 movq_cfi rsi, (RSI-RBP)
450 movq_cfi rdx, RDX-RBP 450 movq_cfi rdx, (RDX-RBP)
451 movq_cfi rcx, RCX-RBP 451 movq_cfi rcx, (RCX-RBP)
452 movq_cfi rax, RAX-RBP 452 movq_cfi rax, (RAX-RBP)
453 movq_cfi r8, R8-RBP 453 movq_cfi r8, (R8-RBP)
454 movq_cfi r9, R9-RBP 454 movq_cfi r9, (R9-RBP)
455 movq_cfi r10, R10-RBP 455 movq_cfi r10, (R10-RBP)
456 movq_cfi r11, R11-RBP 456 movq_cfi r11, (R11-RBP)
457 457
458 /* Save rbp so that we can unwind from get_irq_regs() */ 458 /* Save rbp so that we can unwind from get_irq_regs() */
459 movq_cfi rbp, 0 459 movq_cfi rbp, 0
@@ -487,7 +487,7 @@ ENDPROC(native_usergs_sysret64)
487 .endm 487 .endm
488 488
489ENTRY(save_rest) 489ENTRY(save_rest)
490 PARTIAL_FRAME 1 REST_SKIP+8 490 PARTIAL_FRAME 1 (REST_SKIP+8)
491 movq 5*8+16(%rsp), %r11 /* save return address */ 491 movq 5*8+16(%rsp), %r11 /* save return address */
492 movq_cfi rbx, RBX+16 492 movq_cfi rbx, RBX+16
493 movq_cfi rbp, RBP+16 493 movq_cfi rbp, RBP+16
@@ -543,7 +543,7 @@ ENTRY(ret_from_fork)
543 543
544 LOCK ; btr $TIF_FORK,TI_flags(%r8) 544 LOCK ; btr $TIF_FORK,TI_flags(%r8)
545 545
546 pushq_cfi kernel_eflags(%rip) 546 pushq_cfi $0x0002
547 popfq_cfi # reset kernel eflags 547 popfq_cfi # reset kernel eflags
548 548
549 call schedule_tail # rdi: 'prev' task parameter 549 call schedule_tail # rdi: 'prev' task parameter
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7c5a8c314c02..c80a33bc528b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -665,7 +665,8 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
665 unsigned long boot_error = 0; 665 unsigned long boot_error = 0;
666 int timeout; 666 int timeout;
667 667
668 alternatives_smp_switch(1); 668 /* Just in case we booted with a single CPU. */
669 alternatives_enable_smp();
669 670
670 idle->thread.sp = (unsigned long) (((struct pt_regs *) 671 idle->thread.sp = (unsigned long) (((struct pt_regs *)
671 (THREAD_SIZE + task_stack_page(idle))) - 1); 672 (THREAD_SIZE + task_stack_page(idle))) - 1);
@@ -1053,20 +1054,6 @@ out:
1053 preempt_enable(); 1054 preempt_enable();
1054} 1055}
1055 1056
1056void arch_disable_nonboot_cpus_begin(void)
1057{
1058 /*
1059 * Avoid the smp alternatives switch during the disable_nonboot_cpus().
1060 * In the suspend path, we will be back in the SMP mode shortly anyways.
1061 */
1062 skip_smp_alternatives = true;
1063}
1064
1065void arch_disable_nonboot_cpus_end(void)
1066{
1067 skip_smp_alternatives = false;
1068}
1069
1070void arch_enable_nonboot_cpus_begin(void) 1057void arch_enable_nonboot_cpus_begin(void)
1071{ 1058{
1072 set_mtrr_aps_delayed_init(); 1059 set_mtrr_aps_delayed_init();
@@ -1256,9 +1243,6 @@ void native_cpu_die(unsigned int cpu)
1256 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 1243 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1257 if (system_state == SYSTEM_RUNNING) 1244 if (system_state == SYSTEM_RUNNING)
1258 pr_info("CPU %u is now offline\n", cpu); 1245 pr_info("CPU %u is now offline\n", cpu);
1259
1260 if (1 == num_online_cpus())
1261 alternatives_smp_switch(0);
1262 return; 1246 return;
1263 } 1247 }
1264 msleep(100); 1248 msleep(100);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 378967578f22..cfbe3fc41586 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -108,30 +108,45 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
108 dec_preempt_count(); 108 dec_preempt_count();
109} 109}
110 110
111static void __kprobes 111static int __kprobes
112do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 112do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
113 long error_code, siginfo_t *info) 113 struct pt_regs *regs, long error_code)
114{ 114{
115 struct task_struct *tsk = current;
116
117#ifdef CONFIG_X86_32 115#ifdef CONFIG_X86_32
118 if (regs->flags & X86_VM_MASK) { 116 if (regs->flags & X86_VM_MASK) {
119 /* 117 /*
120 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 118 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
121 * On nmi (interrupt 2), do_trap should not be called. 119 * On nmi (interrupt 2), do_trap should not be called.
122 */ 120 */
123 if (trapnr < X86_TRAP_UD) 121 if (trapnr < X86_TRAP_UD) {
124 goto vm86_trap; 122 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
125 goto trap_signal; 123 error_code, trapnr))
124 return 0;
125 }
126 return -1;
126 } 127 }
127#endif 128#endif
129 if (!user_mode(regs)) {
130 if (!fixup_exception(regs)) {
131 tsk->thread.error_code = error_code;
132 tsk->thread.trap_nr = trapnr;
133 die(str, regs, error_code);
134 }
135 return 0;
136 }
128 137
129 if (!user_mode(regs)) 138 return -1;
130 goto kernel_trap; 139}
131 140
132#ifdef CONFIG_X86_32 141static void __kprobes
133trap_signal: 142do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
134#endif 143 long error_code, siginfo_t *info)
144{
145 struct task_struct *tsk = current;
146
147
148 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
149 return;
135 /* 150 /*
136 * We want error_code and trap_nr set for userspace faults and 151 * We want error_code and trap_nr set for userspace faults and
137 * kernelspace faults which result in die(), but not 152 * kernelspace faults which result in die(), but not
@@ -159,23 +174,6 @@ trap_signal:
159 force_sig_info(signr, info, tsk); 174 force_sig_info(signr, info, tsk);
160 else 175 else
161 force_sig(signr, tsk); 176 force_sig(signr, tsk);
162 return;
163
164kernel_trap:
165 if (!fixup_exception(regs)) {
166 tsk->thread.error_code = error_code;
167 tsk->thread.trap_nr = trapnr;
168 die(str, regs, error_code);
169 }
170 return;
171
172#ifdef CONFIG_X86_32
173vm86_trap:
174 if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
175 error_code, trapnr))
176 goto trap_signal;
177 return;
178#endif
179} 177}
180 178
181#define DO_ERROR(trapnr, signr, str, name) \ 179#define DO_ERROR(trapnr, signr, str, name) \
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index f58dca7a6e52..353c50f18702 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -377,7 +377,8 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
377 return rc; 377 return rc;
378 378
379 if (num_online_cpus() == 1) 379 if (num_online_cpus() == 1)
380 alternatives_smp_switch(1); 380 /* Just in case we booted with a single CPU. */
381 alternatives_enable_smp();
381 382
382 rc = xen_smp_intr_init(cpu); 383 rc = xen_smp_intr_init(cpu);
383 if (rc) 384 if (rc)
@@ -424,9 +425,6 @@ static void xen_cpu_die(unsigned int cpu)
424 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); 425 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
425 xen_uninit_lock_cpu(cpu); 426 xen_uninit_lock_cpu(cpu);
426 xen_teardown_timer(cpu); 427 xen_teardown_timer(cpu);
427
428 if (num_online_cpus() == 1)
429 alternatives_smp_switch(0);
430} 428}
431 429
432static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ 430static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index e615dfbcf794..f560598807c1 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -447,14 +447,6 @@ EXPORT_SYMBOL_GPL(cpu_up);
447#ifdef CONFIG_PM_SLEEP_SMP 447#ifdef CONFIG_PM_SLEEP_SMP
448static cpumask_var_t frozen_cpus; 448static cpumask_var_t frozen_cpus;
449 449
450void __weak arch_disable_nonboot_cpus_begin(void)
451{
452}
453
454void __weak arch_disable_nonboot_cpus_end(void)
455{
456}
457
458int disable_nonboot_cpus(void) 450int disable_nonboot_cpus(void)
459{ 451{
460 int cpu, first_cpu, error = 0; 452 int cpu, first_cpu, error = 0;
@@ -466,7 +458,6 @@ int disable_nonboot_cpus(void)
466 * with the userspace trying to use the CPU hotplug at the same time 458 * with the userspace trying to use the CPU hotplug at the same time
467 */ 459 */
468 cpumask_clear(frozen_cpus); 460 cpumask_clear(frozen_cpus);
469 arch_disable_nonboot_cpus_begin();
470 461
471 printk("Disabling non-boot CPUs ...\n"); 462 printk("Disabling non-boot CPUs ...\n");
472 for_each_online_cpu(cpu) { 463 for_each_online_cpu(cpu) {
@@ -482,8 +473,6 @@ int disable_nonboot_cpus(void)
482 } 473 }
483 } 474 }
484 475
485 arch_disable_nonboot_cpus_end();
486
487 if (!error) { 476 if (!error) {
488 BUG_ON(num_online_cpus() > 1); 477 BUG_ON(num_online_cpus() > 1);
489 /* Make sure the CPUs won't be enabled by someone else */ 478 /* Make sure the CPUs won't be enabled by someone else */