diff options
author | Li Shaohua <shaohua.li@intel.com> | 2005-06-25 17:54:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-25 19:24:29 -0400 |
commit | 6fe940d6c300886de4ff1454d8ffd363172af433 (patch) | |
tree | 58c34aed66a85ff72bdba1d5e3a3e3c967621a04 | |
parent | 67664c8f7e74def5adf66298a1245d82af72db2c (diff) |
[PATCH] sep initializing rework
Make SEP init per-cpu, so it is hotplug safe.
Signed-off-by: Li Shaohua<shaohua.li@intel.com>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/i386/kernel/cpu/common.c | 3 | ||||
-rw-r--r-- | arch/i386/kernel/smp.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/smpboot.c | 11 | ||||
-rw-r--r-- | arch/i386/kernel/sysenter.c | 12 | ||||
-rw-r--r-- | arch/i386/power/cpu.c | 6 | ||||
-rw-r--r-- | include/asm-i386/processor.h | 2 | ||||
-rw-r--r-- | include/asm-i386/smp.h | 2 |
7 files changed, 38 insertions, 8 deletions
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index b9954248d0aa..d58e169fbdbb 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
@@ -432,6 +432,9 @@ void __init identify_cpu(struct cpuinfo_x86 *c) | |||
432 | #ifdef CONFIG_X86_MCE | 432 | #ifdef CONFIG_X86_MCE |
433 | mcheck_init(c); | 433 | mcheck_init(c); |
434 | #endif | 434 | #endif |
435 | if (c == &boot_cpu_data) | ||
436 | sysenter_setup(); | ||
437 | enable_sep_cpu(); | ||
435 | } | 438 | } |
436 | 439 | ||
437 | #ifdef CONFIG_X86_HT | 440 | #ifdef CONFIG_X86_HT |
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index 35f521612b20..cec4bde67161 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c | |||
@@ -495,6 +495,16 @@ struct call_data_struct { | |||
495 | int wait; | 495 | int wait; |
496 | }; | 496 | }; |
497 | 497 | ||
498 | void lock_ipi_call_lock(void) | ||
499 | { | ||
500 | spin_lock_irq(&call_lock); | ||
501 | } | ||
502 | |||
503 | void unlock_ipi_call_lock(void) | ||
504 | { | ||
505 | spin_unlock_irq(&call_lock); | ||
506 | } | ||
507 | |||
498 | static struct call_data_struct * call_data; | 508 | static struct call_data_struct * call_data; |
499 | 509 | ||
500 | /* | 510 | /* |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index ad74a46e9ef0..c5517f332309 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -449,7 +449,18 @@ static void __init start_secondary(void *unused) | |||
449 | * the local TLBs too. | 449 | * the local TLBs too. |
450 | */ | 450 | */ |
451 | local_flush_tlb(); | 451 | local_flush_tlb(); |
452 | |||
453 | /* | ||
454 | * We need to hold call_lock, so there is no inconsistency | ||
455 | * between the time smp_call_function() determines number of | ||
456 | * IPI receipients, and the time when the determination is made | ||
457 | * for which cpus receive the IPI. Holding this | ||
458 | * lock helps us to not include this cpu in a currently in progress | ||
459 | * smp_call_function(). | ||
460 | */ | ||
461 | lock_ipi_call_lock(); | ||
452 | cpu_set(smp_processor_id(), cpu_online_map); | 462 | cpu_set(smp_processor_id(), cpu_online_map); |
463 | unlock_ipi_call_lock(); | ||
453 | 464 | ||
454 | /* We can take interrupts now: we're officially "up". */ | 465 | /* We can take interrupts now: we're officially "up". */ |
455 | local_irq_enable(); | 466 | local_irq_enable(); |
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c index 960d8bd137d0..0bada1870bdf 100644 --- a/arch/i386/kernel/sysenter.c +++ b/arch/i386/kernel/sysenter.c | |||
@@ -21,11 +21,16 @@ | |||
21 | 21 | ||
22 | extern asmlinkage void sysenter_entry(void); | 22 | extern asmlinkage void sysenter_entry(void); |
23 | 23 | ||
24 | void enable_sep_cpu(void *info) | 24 | void enable_sep_cpu(void) |
25 | { | 25 | { |
26 | int cpu = get_cpu(); | 26 | int cpu = get_cpu(); |
27 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 27 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
28 | 28 | ||
29 | if (!boot_cpu_has(X86_FEATURE_SEP)) { | ||
30 | put_cpu(); | ||
31 | return; | ||
32 | } | ||
33 | |||
29 | tss->ss1 = __KERNEL_CS; | 34 | tss->ss1 = __KERNEL_CS; |
30 | tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss; | 35 | tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss; |
31 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); | 36 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); |
@@ -41,7 +46,7 @@ void enable_sep_cpu(void *info) | |||
41 | extern const char vsyscall_int80_start, vsyscall_int80_end; | 46 | extern const char vsyscall_int80_start, vsyscall_int80_end; |
42 | extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; | 47 | extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; |
43 | 48 | ||
44 | static int __init sysenter_setup(void) | 49 | int __init sysenter_setup(void) |
45 | { | 50 | { |
46 | void *page = (void *)get_zeroed_page(GFP_ATOMIC); | 51 | void *page = (void *)get_zeroed_page(GFP_ATOMIC); |
47 | 52 | ||
@@ -58,8 +63,5 @@ static int __init sysenter_setup(void) | |||
58 | &vsyscall_sysenter_start, | 63 | &vsyscall_sysenter_start, |
59 | &vsyscall_sysenter_end - &vsyscall_sysenter_start); | 64 | &vsyscall_sysenter_end - &vsyscall_sysenter_start); |
60 | 65 | ||
61 | on_each_cpu(enable_sep_cpu, NULL, 1, 1); | ||
62 | return 0; | 66 | return 0; |
63 | } | 67 | } |
64 | |||
65 | __initcall(sysenter_setup); | ||
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c index 6f521cf19a13..d099d01461f4 100644 --- a/arch/i386/power/cpu.c +++ b/arch/i386/power/cpu.c | |||
@@ -22,9 +22,11 @@ | |||
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/suspend.h> | 23 | #include <linux/suspend.h> |
24 | #include <linux/acpi.h> | 24 | #include <linux/acpi.h> |
25 | |||
25 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
26 | #include <asm/acpi.h> | 27 | #include <asm/acpi.h> |
27 | #include <asm/tlbflush.h> | 28 | #include <asm/tlbflush.h> |
29 | #include <asm/processor.h> | ||
28 | 30 | ||
29 | static struct saved_context saved_context; | 31 | static struct saved_context saved_context; |
30 | 32 | ||
@@ -33,8 +35,6 @@ unsigned long saved_context_esp, saved_context_ebp; | |||
33 | unsigned long saved_context_esi, saved_context_edi; | 35 | unsigned long saved_context_esi, saved_context_edi; |
34 | unsigned long saved_context_eflags; | 36 | unsigned long saved_context_eflags; |
35 | 37 | ||
36 | extern void enable_sep_cpu(void *); | ||
37 | |||
38 | void __save_processor_state(struct saved_context *ctxt) | 38 | void __save_processor_state(struct saved_context *ctxt) |
39 | { | 39 | { |
40 | kernel_fpu_begin(); | 40 | kernel_fpu_begin(); |
@@ -136,7 +136,7 @@ void __restore_processor_state(struct saved_context *ctxt) | |||
136 | * sysenter MSRs | 136 | * sysenter MSRs |
137 | */ | 137 | */ |
138 | if (boot_cpu_has(X86_FEATURE_SEP)) | 138 | if (boot_cpu_has(X86_FEATURE_SEP)) |
139 | enable_sep_cpu(NULL); | 139 | enable_sep_cpu(); |
140 | 140 | ||
141 | fix_processor_context(); | 141 | fix_processor_context(); |
142 | do_fpu_end(); | 142 | do_fpu_end(); |
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index c76c50e96225..6f0f93d0d417 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h | |||
@@ -691,5 +691,7 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c); | |||
691 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | 691 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) |
692 | 692 | ||
693 | extern unsigned long boot_option_idle_override; | 693 | extern unsigned long boot_option_idle_override; |
694 | extern void enable_sep_cpu(void); | ||
695 | extern int sysenter_setup(void); | ||
694 | 696 | ||
695 | #endif /* __ASM_I386_PROCESSOR_H */ | 697 | #endif /* __ASM_I386_PROCESSOR_H */ |
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index 507f2fd39a6a..2451ead0ca5c 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h | |||
@@ -42,6 +42,8 @@ extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); | |||
42 | extern void smp_invalidate_rcv(void); /* Process an NMI */ | 42 | extern void smp_invalidate_rcv(void); /* Process an NMI */ |
43 | extern void (*mtrr_hook) (void); | 43 | extern void (*mtrr_hook) (void); |
44 | extern void zap_low_mappings (void); | 44 | extern void zap_low_mappings (void); |
45 | extern void lock_ipi_call_lock(void); | ||
46 | extern void unlock_ipi_call_lock(void); | ||
45 | 47 | ||
46 | #define MAX_APICID 256 | 48 | #define MAX_APICID 256 |
47 | extern u8 x86_cpu_to_apicid[]; | 49 | extern u8 x86_cpu_to_apicid[]; |