aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/cpu.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h5
-rw-r--r--arch/x86/include/asm/paravirt.h5
-rw-r--r--arch/x86/include/asm/percpu.h24
-rw-r--r--arch/x86/include/asm/smp.h5
-rw-r--r--arch/x86/include/asm/system_64.h22
-rw-r--r--arch/x86/kernel/acpi/sleep.c15
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/apic/apic.c9
-rw-r--r--arch/x86/kernel/apic/io_apic.c3
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c12
-rw-r--r--arch/x86/kernel/dumpstack_64.c2
-rw-r--r--arch/x86/kernel/head_32.S30
-rw-r--r--arch/x86/kernel/irq.c3
-rw-r--r--arch/x86/kernel/process.c24
-rw-r--r--arch/x86/kernel/smpboot.c8
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/mm/pageattr.c8
-rw-r--r--arch/x86/xen/p2m.c18
-rw-r--r--arch/x86/xen/setup.c8
22 files changed, 112 insertions, 106 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 5e3969c36d7f..3c896946f4cc 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -233,6 +233,7 @@ extern void sync_Arb_IDs(void);
233extern void init_bsp_APIC(void); 233extern void init_bsp_APIC(void);
234extern void setup_local_APIC(void); 234extern void setup_local_APIC(void);
235extern void end_local_APIC_setup(void); 235extern void end_local_APIC_setup(void);
236extern void bsp_end_local_APIC_setup(void);
236extern void init_apic_mappings(void); 237extern void init_apic_mappings(void);
237void register_lapic_address(unsigned long address); 238void register_lapic_address(unsigned long address);
238extern void setup_boot_APIC_clock(void); 239extern void setup_boot_APIC_clock(void);
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 6e6e7558e702..4564c8e28a33 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -32,6 +32,6 @@ extern void arch_unregister_cpu(int);
32 32
33DECLARE_PER_CPU(int, cpu_state); 33DECLARE_PER_CPU(int, cpu_state);
34 34
35int __cpuinit mwait_usable(const struct cpuinfo_x86 *); 35int mwait_usable(const struct cpuinfo_x86 *);
36 36
37#endif /* _ASM_X86_CPU_H */ 37#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 4a2d4e0c18d9..8b5393ec1080 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
36 unsigned cpu = smp_processor_id(); 36 unsigned cpu = smp_processor_id();
37 37
38 if (likely(prev != next)) { 38 if (likely(prev != next)) {
39 /* stop flush ipis for the previous mm */
40 cpumask_clear_cpu(cpu, mm_cpumask(prev));
41#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 40 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
43 percpu_write(cpu_tlbstate.active_mm, next); 41 percpu_write(cpu_tlbstate.active_mm, next);
@@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
47 /* Re-load page tables */ 45 /* Re-load page tables */
48 load_cr3(next->pgd); 46 load_cr3(next->pgd);
49 47
48 /* stop flush ipis for the previous mm */
49 cpumask_clear_cpu(cpu, mm_cpumask(prev));
50
50 /* 51 /*
51 * load the LDT, if the LDT is different: 52 * load the LDT, if the LDT is different:
52 */ 53 */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 2071a8b2b32f..ebbc4d8ab170 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -558,13 +558,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
558static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 558static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
559 pmd_t *pmdp, pmd_t pmd) 559 pmd_t *pmdp, pmd_t pmd)
560{ 560{
561#if PAGETABLE_LEVELS >= 3
562 if (sizeof(pmdval_t) > sizeof(long)) 561 if (sizeof(pmdval_t) > sizeof(long))
563 /* 5 arg words */ 562 /* 5 arg words */
564 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd); 563 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
565 else 564 else
566 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, pmd.pmd); 565 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
567#endif 566 native_pmd_val(pmd));
568} 567}
569#endif 568#endif
570 569
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 3788f4649db4..7e172955ee57 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -273,34 +273,34 @@ do { \
273 typeof(var) pxo_new__ = (nval); \ 273 typeof(var) pxo_new__ = (nval); \
274 switch (sizeof(var)) { \ 274 switch (sizeof(var)) { \
275 case 1: \ 275 case 1: \
276 asm("\n1:mov "__percpu_arg(1)",%%al" \ 276 asm("\n\tmov "__percpu_arg(1)",%%al" \
277 "\n\tcmpxchgb %2, "__percpu_arg(1) \ 277 "\n1:\tcmpxchgb %2, "__percpu_arg(1) \
278 "\n\tjnz 1b" \ 278 "\n\tjnz 1b" \
279 : "=a" (pxo_ret__), "+m" (var) \ 279 : "=&a" (pxo_ret__), "+m" (var) \
280 : "q" (pxo_new__) \ 280 : "q" (pxo_new__) \
281 : "memory"); \ 281 : "memory"); \
282 break; \ 282 break; \
283 case 2: \ 283 case 2: \
284 asm("\n1:mov "__percpu_arg(1)",%%ax" \ 284 asm("\n\tmov "__percpu_arg(1)",%%ax" \
285 "\n\tcmpxchgw %2, "__percpu_arg(1) \ 285 "\n1:\tcmpxchgw %2, "__percpu_arg(1) \
286 "\n\tjnz 1b" \ 286 "\n\tjnz 1b" \
287 : "=a" (pxo_ret__), "+m" (var) \ 287 : "=&a" (pxo_ret__), "+m" (var) \
288 : "r" (pxo_new__) \ 288 : "r" (pxo_new__) \
289 : "memory"); \ 289 : "memory"); \
290 break; \ 290 break; \
291 case 4: \ 291 case 4: \
292 asm("\n1:mov "__percpu_arg(1)",%%eax" \ 292 asm("\n\tmov "__percpu_arg(1)",%%eax" \
293 "\n\tcmpxchgl %2, "__percpu_arg(1) \ 293 "\n1:\tcmpxchgl %2, "__percpu_arg(1) \
294 "\n\tjnz 1b" \ 294 "\n\tjnz 1b" \
295 : "=a" (pxo_ret__), "+m" (var) \ 295 : "=&a" (pxo_ret__), "+m" (var) \
296 : "r" (pxo_new__) \ 296 : "r" (pxo_new__) \
297 : "memory"); \ 297 : "memory"); \
298 break; \ 298 break; \
299 case 8: \ 299 case 8: \
300 asm("\n1:mov "__percpu_arg(1)",%%rax" \ 300 asm("\n\tmov "__percpu_arg(1)",%%rax" \
301 "\n\tcmpxchgq %2, "__percpu_arg(1) \ 301 "\n1:\tcmpxchgq %2, "__percpu_arg(1) \
302 "\n\tjnz 1b" \ 302 "\n\tjnz 1b" \
303 : "=a" (pxo_ret__), "+m" (var) \ 303 : "=&a" (pxo_ret__), "+m" (var) \
304 : "r" (pxo_new__) \ 304 : "r" (pxo_new__) \
305 : "memory"); \ 305 : "memory"); \
306 break; \ 306 break; \
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4c2f63c7fc1b..1f4695136776 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
40DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); 40DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
41 41
42/* Static state in head.S used to set up a CPU */ 42/* Static state in head.S used to set up a CPU */
43extern struct { 43extern unsigned long stack_start; /* Initial stack pointer address */
44 void *sp;
45 unsigned short ss;
46} stack_start;
47 44
48struct smp_ops { 45struct smp_ops {
49 void (*smp_prepare_boot_cpu)(void); 46 void (*smp_prepare_boot_cpu)(void);
diff --git a/arch/x86/include/asm/system_64.h b/arch/x86/include/asm/system_64.h
deleted file mode 100644
index 1159e091ad09..000000000000
--- a/arch/x86/include/asm/system_64.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef _ASM_X86_SYSTEM_64_H
2#define _ASM_X86_SYSTEM_64_H
3
4#include <asm/segment.h>
5#include <asm/cmpxchg.h>
6
7
8static inline unsigned long read_cr8(void)
9{
10 unsigned long cr8;
11 asm volatile("movq %%cr8,%0" : "=r" (cr8));
12 return cr8;
13}
14
15static inline void write_cr8(unsigned long val)
16{
17 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
18}
19
20#include <linux/irqflags.h>
21
22#endif /* _ASM_X86_SYSTEM_64_H */
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 69fd72aa5594..68d1537b8c81 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -12,10 +12,8 @@
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <asm/segment.h> 13#include <asm/segment.h>
14#include <asm/desc.h> 14#include <asm/desc.h>
15
16#ifdef CONFIG_X86_32
17#include <asm/pgtable.h> 15#include <asm/pgtable.h>
18#endif 16#include <asm/cacheflush.h>
19 17
20#include "realmode/wakeup.h" 18#include "realmode/wakeup.h"
21#include "sleep.h" 19#include "sleep.h"
@@ -100,7 +98,7 @@ int acpi_save_state_mem(void)
100#else /* CONFIG_64BIT */ 98#else /* CONFIG_64BIT */
101 header->trampoline_segment = setup_trampoline() >> 4; 99 header->trampoline_segment = setup_trampoline() >> 4;
102#ifdef CONFIG_SMP 100#ifdef CONFIG_SMP
103 stack_start.sp = temp_stack + sizeof(temp_stack); 101 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
104 early_gdt_descr.address = 102 early_gdt_descr.address =
105 (unsigned long)get_cpu_gdt_table(smp_processor_id()); 103 (unsigned long)get_cpu_gdt_table(smp_processor_id());
106 initial_gs = per_cpu_offset(smp_processor_id()); 104 initial_gs = per_cpu_offset(smp_processor_id());
@@ -149,6 +147,15 @@ void __init acpi_reserve_wakeup_memory(void)
149 memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); 147 memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
150} 148}
151 149
150int __init acpi_configure_wakeup_memory(void)
151{
152 if (acpi_realmode)
153 set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT);
154
155 return 0;
156}
157arch_initcall(acpi_configure_wakeup_memory);
158
152 159
153static int __init acpi_sleep_setup(char *str) 160static int __init acpi_sleep_setup(char *str)
154{ 161{
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 123608531c8f..7038b95d363f 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -671,7 +671,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
671 671
672 atomic_set(&stop_machine_first, 1); 672 atomic_set(&stop_machine_first, 1);
673 wrote_text = 0; 673 wrote_text = 0;
674 stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); 674 __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
675} 675}
676 676
677#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) 677#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 06c196d7e59c..76b96d74978a 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1381,12 +1381,17 @@ void __cpuinit end_local_APIC_setup(void)
1381#endif 1381#endif
1382 1382
1383 apic_pm_activate(); 1383 apic_pm_activate();
1384}
1385
1386void __init bsp_end_local_APIC_setup(void)
1387{
1388 end_local_APIC_setup();
1384 1389
1385 /* 1390 /*
1386 * Now that local APIC setup is completed for BP, configure the fault 1391 * Now that local APIC setup is completed for BP, configure the fault
1387 * handling for interrupt remapping. 1392 * handling for interrupt remapping.
1388 */ 1393 */
1389 if (!smp_processor_id() && intr_remapping_enabled) 1394 if (intr_remapping_enabled)
1390 enable_drhd_fault_handling(); 1395 enable_drhd_fault_handling();
1391 1396
1392} 1397}
@@ -1756,7 +1761,7 @@ int __init APIC_init_uniprocessor(void)
1756 enable_IO_APIC(); 1761 enable_IO_APIC();
1757#endif 1762#endif
1758 1763
1759 end_local_APIC_setup(); 1764 bsp_end_local_APIC_setup();
1760 1765
1761#ifdef CONFIG_X86_IO_APIC 1766#ifdef CONFIG_X86_IO_APIC
1762 if (smp_found_config && !skip_ioapic_setup && nr_ioapics) 1767 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 697dc34b7b87..ca9e2a3545a9 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -4002,6 +4002,9 @@ int mp_find_ioapic(u32 gsi)
4002{ 4002{
4003 int i = 0; 4003 int i = 0;
4004 4004
4005 if (nr_ioapics == 0)
4006 return -1;
4007
4005 /* Find the IOAPIC that manages this GSI. */ 4008 /* Find the IOAPIC that manages this GSI. */
4006 for (i = 0; i < nr_ioapics; i++) { 4009 for (i = 0; i < nr_ioapics; i++) {
4007 if ((gsi >= mp_gsi_routing[i].gsi_base) 4010 if ((gsi >= mp_gsi_routing[i].gsi_base)
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 01c0f3ee6cc3..bebabec5b448 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
793} 793}
794 794
795/* 795/*
796 * MTRR initialization for all AP's 796 * Delayed MTRR initialization for all AP's
797 */ 797 */
798void mtrr_aps_init(void) 798void mtrr_aps_init(void)
799{ 799{
800 if (!use_intel()) 800 if (!use_intel())
801 return; 801 return;
802 802
803 /*
804 * Check if someone has requested the delay of AP MTRR initialization,
805 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
806 * then we are done.
807 */
808 if (!mtrr_aps_delayed_init)
809 return;
810
803 set_mtrr(~0U, 0, 0, 0); 811 set_mtrr(~0U, 0, 0, 0);
804 mtrr_aps_delayed_init = false; 812 mtrr_aps_delayed_init = false;
805} 813}
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index e56b9bfbabd1..f7a0993c1e7c 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -682,7 +682,7 @@ static int p4_validate_raw_event(struct perf_event *event)
682 * if an event is shared accross the logical threads 682 * if an event is shared accross the logical threads
683 * the user needs special permissions to be able to use it 683 * the user needs special permissions to be able to use it
684 */ 684 */
685 if (p4_event_bind_map[v].shared) { 685 if (p4_ht_active() && p4_event_bind_map[v].shared) {
686 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 686 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
687 return -EACCES; 687 return -EACCES;
688 } 688 }
@@ -727,7 +727,8 @@ static int p4_hw_config(struct perf_event *event)
727 event->hw.config = p4_set_ht_bit(event->hw.config); 727 event->hw.config = p4_set_ht_bit(event->hw.config);
728 728
729 if (event->attr.type == PERF_TYPE_RAW) { 729 if (event->attr.type == PERF_TYPE_RAW) {
730 730 struct p4_event_bind *bind;
731 unsigned int esel;
731 /* 732 /*
732 * Clear bits we reserve to be managed by kernel itself 733 * Clear bits we reserve to be managed by kernel itself
733 * and never allowed from a user space 734 * and never allowed from a user space
@@ -743,6 +744,13 @@ static int p4_hw_config(struct perf_event *event)
743 * bits since we keep additional info here (for cache events and etc) 744 * bits since we keep additional info here (for cache events and etc)
744 */ 745 */
745 event->hw.config |= event->attr.config; 746 event->hw.config |= event->attr.config;
747 bind = p4_config_get_bind(event->attr.config);
748 if (!bind) {
749 rc = -EINVAL;
750 goto out;
751 }
752 esel = P4_OPCODE_ESEL(bind->opcode);
753 event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
746 } 754 }
747 755
748 rc = x86_setup_perfctr(event); 756 rc = x86_setup_perfctr(event);
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 64101335de19..a6b6fcf7f0ae 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -149,13 +149,13 @@ void dump_trace(struct task_struct *task,
149 unsigned used = 0; 149 unsigned used = 0;
150 struct thread_info *tinfo; 150 struct thread_info *tinfo;
151 int graph = 0; 151 int graph = 0;
152 unsigned long dummy;
152 unsigned long bp; 153 unsigned long bp;
153 154
154 if (!task) 155 if (!task)
155 task = current; 156 task = current;
156 157
157 if (!stack) { 158 if (!stack) {
158 unsigned long dummy;
159 stack = &dummy; 159 stack = &dummy;
160 if (task && task != current) 160 if (task && task != current)
161 stack = (unsigned long *)task->thread.sp; 161 stack = (unsigned long *)task->thread.sp;
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index fc293dc8dc35..767d6c43de37 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
85 */ 85 */
86__HEAD 86__HEAD
87ENTRY(startup_32) 87ENTRY(startup_32)
88 movl pa(stack_start),%ecx
89
88 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 90 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
89 us to not reload segments */ 91 us to not reload segments */
90 testb $(1<<6), BP_loadflags(%esi) 92 testb $(1<<6), BP_loadflags(%esi)
@@ -99,7 +101,9 @@ ENTRY(startup_32)
99 movl %eax,%es 101 movl %eax,%es
100 movl %eax,%fs 102 movl %eax,%fs
101 movl %eax,%gs 103 movl %eax,%gs
104 movl %eax,%ss
1022: 1052:
106 leal -__PAGE_OFFSET(%ecx),%esp
103 107
104/* 108/*
105 * Clear BSS first so that there are no surprises... 109 * Clear BSS first so that there are no surprises...
@@ -145,8 +149,6 @@ ENTRY(startup_32)
145 * _brk_end is set up to point to the first "safe" location. 149 * _brk_end is set up to point to the first "safe" location.
146 * Mappings are created both at virtual address 0 (identity mapping) 150 * Mappings are created both at virtual address 0 (identity mapping)
147 * and PAGE_OFFSET for up to _end. 151 * and PAGE_OFFSET for up to _end.
148 *
149 * Note that the stack is not yet set up!
150 */ 152 */
151#ifdef CONFIG_X86_PAE 153#ifdef CONFIG_X86_PAE
152 154
@@ -282,6 +284,9 @@ ENTRY(startup_32_smp)
282 movl %eax,%es 284 movl %eax,%es
283 movl %eax,%fs 285 movl %eax,%fs
284 movl %eax,%gs 286 movl %eax,%gs
287 movl pa(stack_start),%ecx
288 movl %eax,%ss
289 leal -__PAGE_OFFSET(%ecx),%esp
285#endif /* CONFIG_SMP */ 290#endif /* CONFIG_SMP */
286default_entry: 291default_entry:
287 292
@@ -347,8 +352,8 @@ default_entry:
347 movl %eax,%cr0 /* ..and set paging (PG) bit */ 352 movl %eax,%cr0 /* ..and set paging (PG) bit */
348 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 353 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
3491: 3541:
350 /* Set up the stack pointer */ 355 /* Shift the stack pointer to a virtual address */
351 lss stack_start,%esp 356 addl $__PAGE_OFFSET, %esp
352 357
353/* 358/*
354 * Initialize eflags. Some BIOS's leave bits like NT set. This would 359 * Initialize eflags. Some BIOS's leave bits like NT set. This would
@@ -360,9 +365,7 @@ default_entry:
360 365
361#ifdef CONFIG_SMP 366#ifdef CONFIG_SMP
362 cmpb $0, ready 367 cmpb $0, ready
363 jz 1f /* Initial CPU cleans BSS */ 368 jnz checkCPUtype
364 jmp checkCPUtype
3651:
366#endif /* CONFIG_SMP */ 369#endif /* CONFIG_SMP */
367 370
368/* 371/*
@@ -470,14 +473,7 @@ is386: movl $2,%ecx # set MP
470 473
471 cld # gcc2 wants the direction flag cleared at all times 474 cld # gcc2 wants the direction flag cleared at all times
472 pushl $0 # fake return address for unwinder 475 pushl $0 # fake return address for unwinder
473#ifdef CONFIG_SMP
474 movb ready, %cl
475 movb $1, ready 476 movb $1, ready
476 cmpb $0,%cl # the first CPU calls start_kernel
477 je 1f
478 movl (stack_start), %esp
4791:
480#endif /* CONFIG_SMP */
481 jmp *(initial_code) 477 jmp *(initial_code)
482 478
483/* 479/*
@@ -670,15 +666,15 @@ ENTRY(initial_page_table)
670#endif 666#endif
671 667
672.data 668.data
669.balign 4
673ENTRY(stack_start) 670ENTRY(stack_start)
674 .long init_thread_union+THREAD_SIZE 671 .long init_thread_union+THREAD_SIZE
675 .long __BOOT_DS
676
677ready: .byte 0
678 672
679early_recursion_flag: 673early_recursion_flag:
680 .long 0 674 .long 0
681 675
676ready: .byte 0
677
682int_msg: 678int_msg:
683 .asciz "Unknown interrupt or fault at: %p %p %p\n" 679 .asciz "Unknown interrupt or fault at: %p %p %p\n"
684 680
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 52945da52a94..387b6a0c9e81 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -367,7 +367,8 @@ void fixup_irqs(void)
367 if (irr & (1 << (vector % 32))) { 367 if (irr & (1 << (vector % 32))) {
368 irq = __this_cpu_read(vector_irq[vector]); 368 irq = __this_cpu_read(vector_irq[vector]);
369 369
370 data = irq_get_irq_data(irq); 370 desc = irq_to_desc(irq);
371 data = &desc->irq_data;
371 raw_spin_lock(&desc->lock); 372 raw_spin_lock(&desc->lock);
372 if (data->chip->irq_retrigger) 373 if (data->chip->irq_retrigger)
373 data->chip->irq_retrigger(data); 374 data->chip->irq_retrigger(data);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index e764fc05d700..ff4554198981 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -92,21 +92,31 @@ void show_regs(struct pt_regs *regs)
92 92
93void show_regs_common(void) 93void show_regs_common(void)
94{ 94{
95 const char *board, *product; 95 const char *vendor, *product, *board;
96 96
97 board = dmi_get_system_info(DMI_BOARD_NAME); 97 vendor = dmi_get_system_info(DMI_SYS_VENDOR);
98 if (!board) 98 if (!vendor)
99 board = ""; 99 vendor = "";
100 product = dmi_get_system_info(DMI_PRODUCT_NAME); 100 product = dmi_get_system_info(DMI_PRODUCT_NAME);
101 if (!product) 101 if (!product)
102 product = ""; 102 product = "";
103 103
104 /* Board Name is optional */
105 board = dmi_get_system_info(DMI_BOARD_NAME);
106
104 printk(KERN_CONT "\n"); 107 printk(KERN_CONT "\n");
105 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n", 108 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
106 current->pid, current->comm, print_tainted(), 109 current->pid, current->comm, print_tainted(),
107 init_utsname()->release, 110 init_utsname()->release,
108 (int)strcspn(init_utsname()->version, " "), 111 (int)strcspn(init_utsname()->version, " "),
109 init_utsname()->version, board, product); 112 init_utsname()->version);
113 printk(KERN_CONT " ");
114 printk(KERN_CONT "%s %s", vendor, product);
115 if (board) {
116 printk(KERN_CONT "/");
117 printk(KERN_CONT "%s", board);
118 }
119 printk(KERN_CONT "\n");
110} 120}
111 121
112void flush_thread(void) 122void flush_thread(void)
@@ -506,7 +516,7 @@ static void poll_idle(void)
506#define MWAIT_ECX_EXTENDED_INFO 0x01 516#define MWAIT_ECX_EXTENDED_INFO 0x01
507#define MWAIT_EDX_C1 0xf0 517#define MWAIT_EDX_C1 0xf0
508 518
509int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) 519int mwait_usable(const struct cpuinfo_x86 *c)
510{ 520{
511 u32 eax, ebx, ecx, edx; 521 u32 eax, ebx, ecx, edx;
512 522
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 0cbe8c0b35ed..08776a953487 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -638,7 +638,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
638 * target processor state. 638 * target processor state.
639 */ 639 */
640 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, 640 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
641 (unsigned long)stack_start.sp); 641 stack_start);
642 642
643 /* 643 /*
644 * Run STARTUP IPI loop. 644 * Run STARTUP IPI loop.
@@ -785,7 +785,7 @@ do_rest:
785#endif 785#endif
786 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 786 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
787 initial_code = (unsigned long)start_secondary; 787 initial_code = (unsigned long)start_secondary;
788 stack_start.sp = (void *) c_idle.idle->thread.sp; 788 stack_start = c_idle.idle->thread.sp;
789 789
790 /* start_ip had better be page-aligned! */ 790 /* start_ip had better be page-aligned! */
791 start_ip = setup_trampoline(); 791 start_ip = setup_trampoline();
@@ -1060,7 +1060,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
1060 1060
1061 connect_bsp_APIC(); 1061 connect_bsp_APIC();
1062 setup_local_APIC(); 1062 setup_local_APIC();
1063 end_local_APIC_setup(); 1063 bsp_end_local_APIC_setup();
1064 return -1; 1064 return -1;
1065 } 1065 }
1066 1066
@@ -1137,7 +1137,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1137 if (!skip_ioapic_setup && nr_ioapics) 1137 if (!skip_ioapic_setup && nr_ioapics)
1138 enable_IO_APIC(); 1138 enable_IO_APIC();
1139 1139
1140 end_local_APIC_setup(); 1140 bsp_end_local_APIC_setup();
1141 1141
1142 map_cpu_to_logical_apicid(); 1142 map_cpu_to_logical_apicid();
1143 1143
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 25bd1bc5aad2..54ce246a383e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1150,8 +1150,8 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1150 kvm_load_ldt(svm->host.ldt); 1150 kvm_load_ldt(svm->host.ldt);
1151#ifdef CONFIG_X86_64 1151#ifdef CONFIG_X86_64
1152 loadsegment(fs, svm->host.fs); 1152 loadsegment(fs, svm->host.fs);
1153 load_gs_index(svm->host.gs);
1154 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); 1153 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
1154 load_gs_index(svm->host.gs);
1155#else 1155#else
1156 loadsegment(gs, svm->host.gs); 1156 loadsegment(gs, svm->host.gs);
1157#endif 1157#endif
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8b830ca14ac4..d343b3c81f3c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -256,7 +256,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
256 unsigned long pfn) 256 unsigned long pfn)
257{ 257{
258 pgprot_t forbidden = __pgprot(0); 258 pgprot_t forbidden = __pgprot(0);
259 pgprot_t required = __pgprot(0);
260 259
261 /* 260 /*
262 * The BIOS area between 640k and 1Mb needs to be executable for 261 * The BIOS area between 640k and 1Mb needs to be executable for
@@ -282,12 +281,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
282 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, 281 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
283 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) 282 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
284 pgprot_val(forbidden) |= _PAGE_RW; 283 pgprot_val(forbidden) |= _PAGE_RW;
285 /*
286 * .data and .bss should always be writable.
287 */
288 if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
289 within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
290 pgprot_val(required) |= _PAGE_RW;
291 284
292#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) 285#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
293 /* 286 /*
@@ -327,7 +320,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
327#endif 320#endif
328 321
329 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); 322 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
330 prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
331 323
332 return prot; 324 return prot;
333} 325}
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index ddc81a06edb9..fd12d7ce7ff9 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -241,21 +241,15 @@ void __init xen_build_dynamic_phys_to_machine(void)
241 * As long as the mfn_list has enough entries to completely 241 * As long as the mfn_list has enough entries to completely
242 * fill a p2m page, pointing into the array is ok. But if 242 * fill a p2m page, pointing into the array is ok. But if
243 * not the entries beyond the last pfn will be undefined. 243 * not the entries beyond the last pfn will be undefined.
244 * And guessing that the 'what-ever-there-is' does not take it
245 * too kindly when changing it to invalid markers, a new page
246 * is allocated, initialized and filled with the valid part.
247 */ 244 */
248 if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) { 245 if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
249 unsigned long p2midx; 246 unsigned long p2midx;
250 unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); 247
251 p2m_init(p2m); 248 p2midx = max_pfn % P2M_PER_PAGE;
252 249 for ( ; p2midx < P2M_PER_PAGE; p2midx++)
253 for (p2midx = 0; pfn + p2midx < max_pfn; p2midx++) { 250 mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
254 p2m[p2midx] = mfn_list[pfn + p2midx]; 251 }
255 } 252 p2m_top[topidx][mididx] = &mfn_list[pfn];
256 p2m_top[topidx][mididx] = p2m;
257 } else
258 p2m_top[topidx][mididx] = &mfn_list[pfn];
259 } 253 }
260 254
261 m2p_override_init(); 255 m2p_override_init();
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b5a7f928234b..a8a66a50d446 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -179,8 +179,13 @@ char * __init xen_memory_setup(void)
179 e820.nr_map = 0; 179 e820.nr_map = 0;
180 xen_extra_mem_start = mem_end; 180 xen_extra_mem_start = mem_end;
181 for (i = 0; i < memmap.nr_entries; i++) { 181 for (i = 0; i < memmap.nr_entries; i++) {
182 unsigned long long end = map[i].addr + map[i].size; 182 unsigned long long end;
183 183
184 /* Guard against non-page aligned E820 entries. */
185 if (map[i].type == E820_RAM)
186 map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE;
187
188 end = map[i].addr + map[i].size;
184 if (map[i].type == E820_RAM && end > mem_end) { 189 if (map[i].type == E820_RAM && end > mem_end) {
185 /* RAM off the end - may be partially included */ 190 /* RAM off the end - may be partially included */
186 u64 delta = min(map[i].size, end - mem_end); 191 u64 delta = min(map[i].size, end - mem_end);
@@ -350,6 +355,7 @@ void __init xen_arch_setup(void)
350 boot_cpu_data.hlt_works_ok = 1; 355 boot_cpu_data.hlt_works_ok = 1;
351#endif 356#endif
352 pm_idle = default_idle; 357 pm_idle = default_idle;
358 boot_option_idle_override = IDLE_HALT;
353 359
354 fiddle_vdso(); 360 fiddle_vdso();
355} 361}