diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/acpi/sleep.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/therm_throt.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/head_32.S | 30 | ||||
-rw-r--r-- | arch/x86/kernel/irq_32.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 7 |
10 files changed, 56 insertions, 34 deletions
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 69fd72aa5594..68d1537b8c81 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -12,10 +12,8 @@ | |||
12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
13 | #include <asm/segment.h> | 13 | #include <asm/segment.h> |
14 | #include <asm/desc.h> | 14 | #include <asm/desc.h> |
15 | |||
16 | #ifdef CONFIG_X86_32 | ||
17 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
18 | #endif | 16 | #include <asm/cacheflush.h> |
19 | 17 | ||
20 | #include "realmode/wakeup.h" | 18 | #include "realmode/wakeup.h" |
21 | #include "sleep.h" | 19 | #include "sleep.h" |
@@ -100,7 +98,7 @@ int acpi_save_state_mem(void) | |||
100 | #else /* CONFIG_64BIT */ | 98 | #else /* CONFIG_64BIT */ |
101 | header->trampoline_segment = setup_trampoline() >> 4; | 99 | header->trampoline_segment = setup_trampoline() >> 4; |
102 | #ifdef CONFIG_SMP | 100 | #ifdef CONFIG_SMP |
103 | stack_start.sp = temp_stack + sizeof(temp_stack); | 101 | stack_start = (unsigned long)temp_stack + sizeof(temp_stack); |
104 | early_gdt_descr.address = | 102 | early_gdt_descr.address = |
105 | (unsigned long)get_cpu_gdt_table(smp_processor_id()); | 103 | (unsigned long)get_cpu_gdt_table(smp_processor_id()); |
106 | initial_gs = per_cpu_offset(smp_processor_id()); | 104 | initial_gs = per_cpu_offset(smp_processor_id()); |
@@ -149,6 +147,15 @@ void __init acpi_reserve_wakeup_memory(void) | |||
149 | memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); | 147 | memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); |
150 | } | 148 | } |
151 | 149 | ||
150 | int __init acpi_configure_wakeup_memory(void) | ||
151 | { | ||
152 | if (acpi_realmode) | ||
153 | set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT); | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | arch_initcall(acpi_configure_wakeup_memory); | ||
158 | |||
152 | 159 | ||
153 | static int __init acpi_sleep_setup(char *str) | 160 | static int __init acpi_sleep_setup(char *str) |
154 | { | 161 | { |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 7283e98deaae..ec2c19a7b8ef 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -45,6 +45,7 @@ static const struct _cache_table __cpuinitconst cache_table[] = | |||
45 | { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ | 45 | { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ |
46 | { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ | 46 | { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ |
47 | { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ | 47 | { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ |
48 | { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */ | ||
48 | { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ | 49 | { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ |
49 | { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | 50 | { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
50 | { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 51 | { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
@@ -66,6 +67,7 @@ static const struct _cache_table __cpuinitconst cache_table[] = | |||
66 | { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */ | 67 | { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */ |
67 | { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */ | 68 | { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */ |
68 | { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */ | 69 | { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */ |
70 | { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */ | ||
69 | { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ | 71 | { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ |
70 | { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */ | 72 | { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */ |
71 | { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ | 73 | { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ |
@@ -87,6 +89,7 @@ static const struct _cache_table __cpuinitconst cache_table[] = | |||
87 | { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 89 | { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
88 | { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */ | 90 | { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */ |
89 | { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ | 91 | { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ |
92 | { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */ | ||
90 | { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ | 93 | { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ |
91 | { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ | 94 | { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ |
92 | { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */ | 95 | { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */ |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index e12246ff5aa6..6f8c5e9da97f 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -59,6 +59,7 @@ struct thermal_state { | |||
59 | 59 | ||
60 | /* Callback to handle core threshold interrupts */ | 60 | /* Callback to handle core threshold interrupts */ |
61 | int (*platform_thermal_notify)(__u64 msr_val); | 61 | int (*platform_thermal_notify)(__u64 msr_val); |
62 | EXPORT_SYMBOL(platform_thermal_notify); | ||
62 | 63 | ||
63 | static DEFINE_PER_CPU(struct thermal_state, thermal_state); | 64 | static DEFINE_PER_CPU(struct thermal_state, thermal_state); |
64 | 65 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 01c0f3ee6cc3..bebabec5b448 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void) | |||
793 | } | 793 | } |
794 | 794 | ||
795 | /* | 795 | /* |
796 | * MTRR initialization for all AP's | 796 | * Delayed MTRR initialization for all AP's |
797 | */ | 797 | */ |
798 | void mtrr_aps_init(void) | 798 | void mtrr_aps_init(void) |
799 | { | 799 | { |
800 | if (!use_intel()) | 800 | if (!use_intel()) |
801 | return; | 801 | return; |
802 | 802 | ||
803 | /* | ||
804 | * Check if someone has requested the delay of AP MTRR initialization, | ||
805 | * by doing set_mtrr_aps_delayed_init(), prior to this point. If not, | ||
806 | * then we are done. | ||
807 | */ | ||
808 | if (!mtrr_aps_delayed_init) | ||
809 | return; | ||
810 | |||
803 | set_mtrr(~0U, 0, 0, 0); | 811 | set_mtrr(~0U, 0, 0, 0); |
804 | mtrr_aps_delayed_init = false; | 812 | mtrr_aps_delayed_init = false; |
805 | } | 813 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index e56b9bfbabd1..f7a0993c1e7c 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -682,7 +682,7 @@ static int p4_validate_raw_event(struct perf_event *event) | |||
682 | * if an event is shared accross the logical threads | 682 | * if an event is shared accross the logical threads |
683 | * the user needs special permissions to be able to use it | 683 | * the user needs special permissions to be able to use it |
684 | */ | 684 | */ |
685 | if (p4_event_bind_map[v].shared) { | 685 | if (p4_ht_active() && p4_event_bind_map[v].shared) { |
686 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | 686 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) |
687 | return -EACCES; | 687 | return -EACCES; |
688 | } | 688 | } |
@@ -727,7 +727,8 @@ static int p4_hw_config(struct perf_event *event) | |||
727 | event->hw.config = p4_set_ht_bit(event->hw.config); | 727 | event->hw.config = p4_set_ht_bit(event->hw.config); |
728 | 728 | ||
729 | if (event->attr.type == PERF_TYPE_RAW) { | 729 | if (event->attr.type == PERF_TYPE_RAW) { |
730 | 730 | struct p4_event_bind *bind; | |
731 | unsigned int esel; | ||
731 | /* | 732 | /* |
732 | * Clear bits we reserve to be managed by kernel itself | 733 | * Clear bits we reserve to be managed by kernel itself |
733 | * and never allowed from a user space | 734 | * and never allowed from a user space |
@@ -743,6 +744,13 @@ static int p4_hw_config(struct perf_event *event) | |||
743 | * bits since we keep additional info here (for cache events and etc) | 744 | * bits since we keep additional info here (for cache events and etc) |
744 | */ | 745 | */ |
745 | event->hw.config |= event->attr.config; | 746 | event->hw.config |= event->attr.config; |
747 | bind = p4_config_get_bind(event->attr.config); | ||
748 | if (!bind) { | ||
749 | rc = -EINVAL; | ||
750 | goto out; | ||
751 | } | ||
752 | esel = P4_OPCODE_ESEL(bind->opcode); | ||
753 | event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel)); | ||
746 | } | 754 | } |
747 | 755 | ||
748 | rc = x86_setup_perfctr(event); | 756 | rc = x86_setup_perfctr(event); |
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 64101335de19..a6b6fcf7f0ae 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -149,13 +149,13 @@ void dump_trace(struct task_struct *task, | |||
149 | unsigned used = 0; | 149 | unsigned used = 0; |
150 | struct thread_info *tinfo; | 150 | struct thread_info *tinfo; |
151 | int graph = 0; | 151 | int graph = 0; |
152 | unsigned long dummy; | ||
152 | unsigned long bp; | 153 | unsigned long bp; |
153 | 154 | ||
154 | if (!task) | 155 | if (!task) |
155 | task = current; | 156 | task = current; |
156 | 157 | ||
157 | if (!stack) { | 158 | if (!stack) { |
158 | unsigned long dummy; | ||
159 | stack = &dummy; | 159 | stack = &dummy; |
160 | if (task && task != current) | 160 | if (task && task != current) |
161 | stack = (unsigned long *)task->thread.sp; | 161 | stack = (unsigned long *)task->thread.sp; |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index fc293dc8dc35..767d6c43de37 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) | |||
85 | */ | 85 | */ |
86 | __HEAD | 86 | __HEAD |
87 | ENTRY(startup_32) | 87 | ENTRY(startup_32) |
88 | movl pa(stack_start),%ecx | ||
89 | |||
88 | /* test KEEP_SEGMENTS flag to see if the bootloader is asking | 90 | /* test KEEP_SEGMENTS flag to see if the bootloader is asking |
89 | us to not reload segments */ | 91 | us to not reload segments */ |
90 | testb $(1<<6), BP_loadflags(%esi) | 92 | testb $(1<<6), BP_loadflags(%esi) |
@@ -99,7 +101,9 @@ ENTRY(startup_32) | |||
99 | movl %eax,%es | 101 | movl %eax,%es |
100 | movl %eax,%fs | 102 | movl %eax,%fs |
101 | movl %eax,%gs | 103 | movl %eax,%gs |
104 | movl %eax,%ss | ||
102 | 2: | 105 | 2: |
106 | leal -__PAGE_OFFSET(%ecx),%esp | ||
103 | 107 | ||
104 | /* | 108 | /* |
105 | * Clear BSS first so that there are no surprises... | 109 | * Clear BSS first so that there are no surprises... |
@@ -145,8 +149,6 @@ ENTRY(startup_32) | |||
145 | * _brk_end is set up to point to the first "safe" location. | 149 | * _brk_end is set up to point to the first "safe" location. |
146 | * Mappings are created both at virtual address 0 (identity mapping) | 150 | * Mappings are created both at virtual address 0 (identity mapping) |
147 | * and PAGE_OFFSET for up to _end. | 151 | * and PAGE_OFFSET for up to _end. |
148 | * | ||
149 | * Note that the stack is not yet set up! | ||
150 | */ | 152 | */ |
151 | #ifdef CONFIG_X86_PAE | 153 | #ifdef CONFIG_X86_PAE |
152 | 154 | ||
@@ -282,6 +284,9 @@ ENTRY(startup_32_smp) | |||
282 | movl %eax,%es | 284 | movl %eax,%es |
283 | movl %eax,%fs | 285 | movl %eax,%fs |
284 | movl %eax,%gs | 286 | movl %eax,%gs |
287 | movl pa(stack_start),%ecx | ||
288 | movl %eax,%ss | ||
289 | leal -__PAGE_OFFSET(%ecx),%esp | ||
285 | #endif /* CONFIG_SMP */ | 290 | #endif /* CONFIG_SMP */ |
286 | default_entry: | 291 | default_entry: |
287 | 292 | ||
@@ -347,8 +352,8 @@ default_entry: | |||
347 | movl %eax,%cr0 /* ..and set paging (PG) bit */ | 352 | movl %eax,%cr0 /* ..and set paging (PG) bit */ |
348 | ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ | 353 | ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ |
349 | 1: | 354 | 1: |
350 | /* Set up the stack pointer */ | 355 | /* Shift the stack pointer to a virtual address */ |
351 | lss stack_start,%esp | 356 | addl $__PAGE_OFFSET, %esp |
352 | 357 | ||
353 | /* | 358 | /* |
354 | * Initialize eflags. Some BIOS's leave bits like NT set. This would | 359 | * Initialize eflags. Some BIOS's leave bits like NT set. This would |
@@ -360,9 +365,7 @@ default_entry: | |||
360 | 365 | ||
361 | #ifdef CONFIG_SMP | 366 | #ifdef CONFIG_SMP |
362 | cmpb $0, ready | 367 | cmpb $0, ready |
363 | jz 1f /* Initial CPU cleans BSS */ | 368 | jnz checkCPUtype |
364 | jmp checkCPUtype | ||
365 | 1: | ||
366 | #endif /* CONFIG_SMP */ | 369 | #endif /* CONFIG_SMP */ |
367 | 370 | ||
368 | /* | 371 | /* |
@@ -470,14 +473,7 @@ is386: movl $2,%ecx # set MP | |||
470 | 473 | ||
471 | cld # gcc2 wants the direction flag cleared at all times | 474 | cld # gcc2 wants the direction flag cleared at all times |
472 | pushl $0 # fake return address for unwinder | 475 | pushl $0 # fake return address for unwinder |
473 | #ifdef CONFIG_SMP | ||
474 | movb ready, %cl | ||
475 | movb $1, ready | 476 | movb $1, ready |
476 | cmpb $0,%cl # the first CPU calls start_kernel | ||
477 | je 1f | ||
478 | movl (stack_start), %esp | ||
479 | 1: | ||
480 | #endif /* CONFIG_SMP */ | ||
481 | jmp *(initial_code) | 477 | jmp *(initial_code) |
482 | 478 | ||
483 | /* | 479 | /* |
@@ -670,15 +666,15 @@ ENTRY(initial_page_table) | |||
670 | #endif | 666 | #endif |
671 | 667 | ||
672 | .data | 668 | .data |
669 | .balign 4 | ||
673 | ENTRY(stack_start) | 670 | ENTRY(stack_start) |
674 | .long init_thread_union+THREAD_SIZE | 671 | .long init_thread_union+THREAD_SIZE |
675 | .long __BOOT_DS | ||
676 | |||
677 | ready: .byte 0 | ||
678 | 672 | ||
679 | early_recursion_flag: | 673 | early_recursion_flag: |
680 | .long 0 | 674 | .long 0 |
681 | 675 | ||
676 | ready: .byte 0 | ||
677 | |||
682 | int_msg: | 678 | int_msg: |
683 | .asciz "Unknown interrupt or fault at: %p %p %p\n" | 679 | .asciz "Unknown interrupt or fault at: %p %p %p\n" |
684 | 680 | ||
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 48ff6dcffa02..9974d21048fd 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -129,8 +129,7 @@ void __cpuinit irq_ctx_init(int cpu) | |||
129 | irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), | 129 | irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), |
130 | THREAD_FLAGS, | 130 | THREAD_FLAGS, |
131 | THREAD_ORDER)); | 131 | THREAD_ORDER)); |
132 | irqctx->tinfo.task = NULL; | 132 | memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); |
133 | irqctx->tinfo.exec_domain = NULL; | ||
134 | irqctx->tinfo.cpu = cpu; | 133 | irqctx->tinfo.cpu = cpu; |
135 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; | 134 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; |
136 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 135 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
@@ -140,10 +139,8 @@ void __cpuinit irq_ctx_init(int cpu) | |||
140 | irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), | 139 | irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), |
141 | THREAD_FLAGS, | 140 | THREAD_FLAGS, |
142 | THREAD_ORDER)); | 141 | THREAD_ORDER)); |
143 | irqctx->tinfo.task = NULL; | 142 | memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); |
144 | irqctx->tinfo.exec_domain = NULL; | ||
145 | irqctx->tinfo.cpu = cpu; | 143 | irqctx->tinfo.cpu = cpu; |
146 | irqctx->tinfo.preempt_count = 0; | ||
147 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 144 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
148 | 145 | ||
149 | per_cpu(softirq_ctx, cpu) = irqctx; | 146 | per_cpu(softirq_ctx, cpu) = irqctx; |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index d8286ed54ffa..e764fc05d700 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/utsname.h> | 14 | #include <linux/utsname.h> |
15 | #include <trace/events/power.h> | 15 | #include <trace/events/power.h> |
16 | #include <linux/hw_breakpoint.h> | 16 | #include <linux/hw_breakpoint.h> |
17 | #include <asm/cpu.h> | ||
17 | #include <asm/system.h> | 18 | #include <asm/system.h> |
18 | #include <asm/apic.h> | 19 | #include <asm/apic.h> |
19 | #include <asm/syscalls.h> | 20 | #include <asm/syscalls.h> |
@@ -505,7 +506,7 @@ static void poll_idle(void) | |||
505 | #define MWAIT_ECX_EXTENDED_INFO 0x01 | 506 | #define MWAIT_ECX_EXTENDED_INFO 0x01 |
506 | #define MWAIT_EDX_C1 0xf0 | 507 | #define MWAIT_EDX_C1 0xf0 |
507 | 508 | ||
508 | static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | 509 | int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) |
509 | { | 510 | { |
510 | u32 eax, ebx, ecx, edx; | 511 | u32 eax, ebx, ecx, edx; |
511 | 512 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 763df77343dd..03273b6c272c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -638,7 +638,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) | |||
638 | * target processor state. | 638 | * target processor state. |
639 | */ | 639 | */ |
640 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, | 640 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, |
641 | (unsigned long)stack_start.sp); | 641 | stack_start); |
642 | 642 | ||
643 | /* | 643 | /* |
644 | * Run STARTUP IPI loop. | 644 | * Run STARTUP IPI loop. |
@@ -785,7 +785,7 @@ do_rest: | |||
785 | #endif | 785 | #endif |
786 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); | 786 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
787 | initial_code = (unsigned long)start_secondary; | 787 | initial_code = (unsigned long)start_secondary; |
788 | stack_start.sp = (void *) c_idle.idle->thread.sp; | 788 | stack_start = c_idle.idle->thread.sp; |
789 | 789 | ||
790 | /* start_ip had better be page-aligned! */ | 790 | /* start_ip had better be page-aligned! */ |
791 | start_ip = setup_trampoline(); | 791 | start_ip = setup_trampoline(); |
@@ -1402,8 +1402,9 @@ static inline void mwait_play_dead(void) | |||
1402 | unsigned int highest_subcstate = 0; | 1402 | unsigned int highest_subcstate = 0; |
1403 | int i; | 1403 | int i; |
1404 | void *mwait_ptr; | 1404 | void *mwait_ptr; |
1405 | struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); | ||
1405 | 1406 | ||
1406 | if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT)) | 1407 | if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c))) |
1407 | return; | 1408 | return; |
1408 | if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH)) | 1409 | if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH)) |
1409 | return; | 1410 | return; |