diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/mmu_context.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/percpu.h | 24 | ||||
-rw-r--r-- | arch/x86/include/asm/smp.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/system_64.h | 22 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/sleep.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/head_32.S | 30 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 8 | ||||
-rw-r--r-- | arch/x86/xen/p2m.c | 18 | ||||
-rw-r--r-- | arch/x86/xen/setup.c | 8 |
14 files changed, 77 insertions, 91 deletions
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 4a2d4e0c18d9..8b5393ec1080 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
36 | unsigned cpu = smp_processor_id(); | 36 | unsigned cpu = smp_processor_id(); |
37 | 37 | ||
38 | if (likely(prev != next)) { | 38 | if (likely(prev != next)) { |
39 | /* stop flush ipis for the previous mm */ | ||
40 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | ||
41 | #ifdef CONFIG_SMP | 39 | #ifdef CONFIG_SMP |
42 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); | 40 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
43 | percpu_write(cpu_tlbstate.active_mm, next); | 41 | percpu_write(cpu_tlbstate.active_mm, next); |
@@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
47 | /* Re-load page tables */ | 45 | /* Re-load page tables */ |
48 | load_cr3(next->pgd); | 46 | load_cr3(next->pgd); |
49 | 47 | ||
48 | /* stop flush ipis for the previous mm */ | ||
49 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | ||
50 | |||
50 | /* | 51 | /* |
51 | * load the LDT, if the LDT is different: | 52 | * load the LDT, if the LDT is different: |
52 | */ | 53 | */ |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 2071a8b2b32f..ebbc4d8ab170 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -558,13 +558,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
558 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | 558 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
559 | pmd_t *pmdp, pmd_t pmd) | 559 | pmd_t *pmdp, pmd_t pmd) |
560 | { | 560 | { |
561 | #if PAGETABLE_LEVELS >= 3 | ||
562 | if (sizeof(pmdval_t) > sizeof(long)) | 561 | if (sizeof(pmdval_t) > sizeof(long)) |
563 | /* 5 arg words */ | 562 | /* 5 arg words */ |
564 | pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd); | 563 | pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd); |
565 | else | 564 | else |
566 | PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, pmd.pmd); | 565 | PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, |
567 | #endif | 566 | native_pmd_val(pmd)); |
568 | } | 567 | } |
569 | #endif | 568 | #endif |
570 | 569 | ||
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 3788f4649db4..7e172955ee57 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -273,34 +273,34 @@ do { \ | |||
273 | typeof(var) pxo_new__ = (nval); \ | 273 | typeof(var) pxo_new__ = (nval); \ |
274 | switch (sizeof(var)) { \ | 274 | switch (sizeof(var)) { \ |
275 | case 1: \ | 275 | case 1: \ |
276 | asm("\n1:mov "__percpu_arg(1)",%%al" \ | 276 | asm("\n\tmov "__percpu_arg(1)",%%al" \ |
277 | "\n\tcmpxchgb %2, "__percpu_arg(1) \ | 277 | "\n1:\tcmpxchgb %2, "__percpu_arg(1) \ |
278 | "\n\tjnz 1b" \ | 278 | "\n\tjnz 1b" \ |
279 | : "=a" (pxo_ret__), "+m" (var) \ | 279 | : "=&a" (pxo_ret__), "+m" (var) \ |
280 | : "q" (pxo_new__) \ | 280 | : "q" (pxo_new__) \ |
281 | : "memory"); \ | 281 | : "memory"); \ |
282 | break; \ | 282 | break; \ |
283 | case 2: \ | 283 | case 2: \ |
284 | asm("\n1:mov "__percpu_arg(1)",%%ax" \ | 284 | asm("\n\tmov "__percpu_arg(1)",%%ax" \ |
285 | "\n\tcmpxchgw %2, "__percpu_arg(1) \ | 285 | "\n1:\tcmpxchgw %2, "__percpu_arg(1) \ |
286 | "\n\tjnz 1b" \ | 286 | "\n\tjnz 1b" \ |
287 | : "=a" (pxo_ret__), "+m" (var) \ | 287 | : "=&a" (pxo_ret__), "+m" (var) \ |
288 | : "r" (pxo_new__) \ | 288 | : "r" (pxo_new__) \ |
289 | : "memory"); \ | 289 | : "memory"); \ |
290 | break; \ | 290 | break; \ |
291 | case 4: \ | 291 | case 4: \ |
292 | asm("\n1:mov "__percpu_arg(1)",%%eax" \ | 292 | asm("\n\tmov "__percpu_arg(1)",%%eax" \ |
293 | "\n\tcmpxchgl %2, "__percpu_arg(1) \ | 293 | "\n1:\tcmpxchgl %2, "__percpu_arg(1) \ |
294 | "\n\tjnz 1b" \ | 294 | "\n\tjnz 1b" \ |
295 | : "=a" (pxo_ret__), "+m" (var) \ | 295 | : "=&a" (pxo_ret__), "+m" (var) \ |
296 | : "r" (pxo_new__) \ | 296 | : "r" (pxo_new__) \ |
297 | : "memory"); \ | 297 | : "memory"); \ |
298 | break; \ | 298 | break; \ |
299 | case 8: \ | 299 | case 8: \ |
300 | asm("\n1:mov "__percpu_arg(1)",%%rax" \ | 300 | asm("\n\tmov "__percpu_arg(1)",%%rax" \ |
301 | "\n\tcmpxchgq %2, "__percpu_arg(1) \ | 301 | "\n1:\tcmpxchgq %2, "__percpu_arg(1) \ |
302 | "\n\tjnz 1b" \ | 302 | "\n\tjnz 1b" \ |
303 | : "=a" (pxo_ret__), "+m" (var) \ | 303 | : "=&a" (pxo_ret__), "+m" (var) \ |
304 | : "r" (pxo_new__) \ | 304 | : "r" (pxo_new__) \ |
305 | : "memory"); \ | 305 | : "memory"); \ |
306 | break; \ | 306 | break; \ |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 3597825a3112..bc6f84e11d32 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -47,10 +47,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); | |||
47 | DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); | 47 | DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); |
48 | 48 | ||
49 | /* Static state in head.S used to set up a CPU */ | 49 | /* Static state in head.S used to set up a CPU */ |
50 | extern struct { | 50 | extern unsigned long stack_start; /* Initial stack pointer address */ |
51 | void *sp; | ||
52 | unsigned short ss; | ||
53 | } stack_start; | ||
54 | 51 | ||
55 | struct smp_ops { | 52 | struct smp_ops { |
56 | void (*smp_prepare_boot_cpu)(void); | 53 | void (*smp_prepare_boot_cpu)(void); |
diff --git a/arch/x86/include/asm/system_64.h b/arch/x86/include/asm/system_64.h deleted file mode 100644 index 1159e091ad09..000000000000 --- a/arch/x86/include/asm/system_64.h +++ /dev/null | |||
@@ -1,22 +0,0 @@ | |||
1 | #ifndef _ASM_X86_SYSTEM_64_H | ||
2 | #define _ASM_X86_SYSTEM_64_H | ||
3 | |||
4 | #include <asm/segment.h> | ||
5 | #include <asm/cmpxchg.h> | ||
6 | |||
7 | |||
8 | static inline unsigned long read_cr8(void) | ||
9 | { | ||
10 | unsigned long cr8; | ||
11 | asm volatile("movq %%cr8,%0" : "=r" (cr8)); | ||
12 | return cr8; | ||
13 | } | ||
14 | |||
15 | static inline void write_cr8(unsigned long val) | ||
16 | { | ||
17 | asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); | ||
18 | } | ||
19 | |||
20 | #include <linux/irqflags.h> | ||
21 | |||
22 | #endif /* _ASM_X86_SYSTEM_64_H */ | ||
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 69fd72aa5594..68d1537b8c81 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -12,10 +12,8 @@ | |||
12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
13 | #include <asm/segment.h> | 13 | #include <asm/segment.h> |
14 | #include <asm/desc.h> | 14 | #include <asm/desc.h> |
15 | |||
16 | #ifdef CONFIG_X86_32 | ||
17 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
18 | #endif | 16 | #include <asm/cacheflush.h> |
19 | 17 | ||
20 | #include "realmode/wakeup.h" | 18 | #include "realmode/wakeup.h" |
21 | #include "sleep.h" | 19 | #include "sleep.h" |
@@ -100,7 +98,7 @@ int acpi_save_state_mem(void) | |||
100 | #else /* CONFIG_64BIT */ | 98 | #else /* CONFIG_64BIT */ |
101 | header->trampoline_segment = setup_trampoline() >> 4; | 99 | header->trampoline_segment = setup_trampoline() >> 4; |
102 | #ifdef CONFIG_SMP | 100 | #ifdef CONFIG_SMP |
103 | stack_start.sp = temp_stack + sizeof(temp_stack); | 101 | stack_start = (unsigned long)temp_stack + sizeof(temp_stack); |
104 | early_gdt_descr.address = | 102 | early_gdt_descr.address = |
105 | (unsigned long)get_cpu_gdt_table(smp_processor_id()); | 103 | (unsigned long)get_cpu_gdt_table(smp_processor_id()); |
106 | initial_gs = per_cpu_offset(smp_processor_id()); | 104 | initial_gs = per_cpu_offset(smp_processor_id()); |
@@ -149,6 +147,15 @@ void __init acpi_reserve_wakeup_memory(void) | |||
149 | memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); | 147 | memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); |
150 | } | 148 | } |
151 | 149 | ||
150 | int __init acpi_configure_wakeup_memory(void) | ||
151 | { | ||
152 | if (acpi_realmode) | ||
153 | set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT); | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | arch_initcall(acpi_configure_wakeup_memory); | ||
158 | |||
152 | 159 | ||
153 | static int __init acpi_sleep_setup(char *str) | 160 | static int __init acpi_sleep_setup(char *str) |
154 | { | 161 | { |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 01c0f3ee6cc3..bebabec5b448 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void) | |||
793 | } | 793 | } |
794 | 794 | ||
795 | /* | 795 | /* |
796 | * MTRR initialization for all AP's | 796 | * Delayed MTRR initialization for all AP's |
797 | */ | 797 | */ |
798 | void mtrr_aps_init(void) | 798 | void mtrr_aps_init(void) |
799 | { | 799 | { |
800 | if (!use_intel()) | 800 | if (!use_intel()) |
801 | return; | 801 | return; |
802 | 802 | ||
803 | /* | ||
804 | * Check if someone has requested the delay of AP MTRR initialization, | ||
805 | * by doing set_mtrr_aps_delayed_init(), prior to this point. If not, | ||
806 | * then we are done. | ||
807 | */ | ||
808 | if (!mtrr_aps_delayed_init) | ||
809 | return; | ||
810 | |||
803 | set_mtrr(~0U, 0, 0, 0); | 811 | set_mtrr(~0U, 0, 0, 0); |
804 | mtrr_aps_delayed_init = false; | 812 | mtrr_aps_delayed_init = false; |
805 | } | 813 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index e56b9bfbabd1..f7a0993c1e7c 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -682,7 +682,7 @@ static int p4_validate_raw_event(struct perf_event *event) | |||
682 | * if an event is shared accross the logical threads | 682 | * if an event is shared accross the logical threads |
683 | * the user needs special permissions to be able to use it | 683 | * the user needs special permissions to be able to use it |
684 | */ | 684 | */ |
685 | if (p4_event_bind_map[v].shared) { | 685 | if (p4_ht_active() && p4_event_bind_map[v].shared) { |
686 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | 686 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) |
687 | return -EACCES; | 687 | return -EACCES; |
688 | } | 688 | } |
@@ -727,7 +727,8 @@ static int p4_hw_config(struct perf_event *event) | |||
727 | event->hw.config = p4_set_ht_bit(event->hw.config); | 727 | event->hw.config = p4_set_ht_bit(event->hw.config); |
728 | 728 | ||
729 | if (event->attr.type == PERF_TYPE_RAW) { | 729 | if (event->attr.type == PERF_TYPE_RAW) { |
730 | 730 | struct p4_event_bind *bind; | |
731 | unsigned int esel; | ||
731 | /* | 732 | /* |
732 | * Clear bits we reserve to be managed by kernel itself | 733 | * Clear bits we reserve to be managed by kernel itself |
733 | * and never allowed from a user space | 734 | * and never allowed from a user space |
@@ -743,6 +744,13 @@ static int p4_hw_config(struct perf_event *event) | |||
743 | * bits since we keep additional info here (for cache events and etc) | 744 | * bits since we keep additional info here (for cache events and etc) |
744 | */ | 745 | */ |
745 | event->hw.config |= event->attr.config; | 746 | event->hw.config |= event->attr.config; |
747 | bind = p4_config_get_bind(event->attr.config); | ||
748 | if (!bind) { | ||
749 | rc = -EINVAL; | ||
750 | goto out; | ||
751 | } | ||
752 | esel = P4_OPCODE_ESEL(bind->opcode); | ||
753 | event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel)); | ||
746 | } | 754 | } |
747 | 755 | ||
748 | rc = x86_setup_perfctr(event); | 756 | rc = x86_setup_perfctr(event); |
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 64101335de19..a6b6fcf7f0ae 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -149,13 +149,13 @@ void dump_trace(struct task_struct *task, | |||
149 | unsigned used = 0; | 149 | unsigned used = 0; |
150 | struct thread_info *tinfo; | 150 | struct thread_info *tinfo; |
151 | int graph = 0; | 151 | int graph = 0; |
152 | unsigned long dummy; | ||
152 | unsigned long bp; | 153 | unsigned long bp; |
153 | 154 | ||
154 | if (!task) | 155 | if (!task) |
155 | task = current; | 156 | task = current; |
156 | 157 | ||
157 | if (!stack) { | 158 | if (!stack) { |
158 | unsigned long dummy; | ||
159 | stack = &dummy; | 159 | stack = &dummy; |
160 | if (task && task != current) | 160 | if (task && task != current) |
161 | stack = (unsigned long *)task->thread.sp; | 161 | stack = (unsigned long *)task->thread.sp; |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index fc293dc8dc35..767d6c43de37 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) | |||
85 | */ | 85 | */ |
86 | __HEAD | 86 | __HEAD |
87 | ENTRY(startup_32) | 87 | ENTRY(startup_32) |
88 | movl pa(stack_start),%ecx | ||
89 | |||
88 | /* test KEEP_SEGMENTS flag to see if the bootloader is asking | 90 | /* test KEEP_SEGMENTS flag to see if the bootloader is asking |
89 | us to not reload segments */ | 91 | us to not reload segments */ |
90 | testb $(1<<6), BP_loadflags(%esi) | 92 | testb $(1<<6), BP_loadflags(%esi) |
@@ -99,7 +101,9 @@ ENTRY(startup_32) | |||
99 | movl %eax,%es | 101 | movl %eax,%es |
100 | movl %eax,%fs | 102 | movl %eax,%fs |
101 | movl %eax,%gs | 103 | movl %eax,%gs |
104 | movl %eax,%ss | ||
102 | 2: | 105 | 2: |
106 | leal -__PAGE_OFFSET(%ecx),%esp | ||
103 | 107 | ||
104 | /* | 108 | /* |
105 | * Clear BSS first so that there are no surprises... | 109 | * Clear BSS first so that there are no surprises... |
@@ -145,8 +149,6 @@ ENTRY(startup_32) | |||
145 | * _brk_end is set up to point to the first "safe" location. | 149 | * _brk_end is set up to point to the first "safe" location. |
146 | * Mappings are created both at virtual address 0 (identity mapping) | 150 | * Mappings are created both at virtual address 0 (identity mapping) |
147 | * and PAGE_OFFSET for up to _end. | 151 | * and PAGE_OFFSET for up to _end. |
148 | * | ||
149 | * Note that the stack is not yet set up! | ||
150 | */ | 152 | */ |
151 | #ifdef CONFIG_X86_PAE | 153 | #ifdef CONFIG_X86_PAE |
152 | 154 | ||
@@ -282,6 +284,9 @@ ENTRY(startup_32_smp) | |||
282 | movl %eax,%es | 284 | movl %eax,%es |
283 | movl %eax,%fs | 285 | movl %eax,%fs |
284 | movl %eax,%gs | 286 | movl %eax,%gs |
287 | movl pa(stack_start),%ecx | ||
288 | movl %eax,%ss | ||
289 | leal -__PAGE_OFFSET(%ecx),%esp | ||
285 | #endif /* CONFIG_SMP */ | 290 | #endif /* CONFIG_SMP */ |
286 | default_entry: | 291 | default_entry: |
287 | 292 | ||
@@ -347,8 +352,8 @@ default_entry: | |||
347 | movl %eax,%cr0 /* ..and set paging (PG) bit */ | 352 | movl %eax,%cr0 /* ..and set paging (PG) bit */ |
348 | ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ | 353 | ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ |
349 | 1: | 354 | 1: |
350 | /* Set up the stack pointer */ | 355 | /* Shift the stack pointer to a virtual address */ |
351 | lss stack_start,%esp | 356 | addl $__PAGE_OFFSET, %esp |
352 | 357 | ||
353 | /* | 358 | /* |
354 | * Initialize eflags. Some BIOS's leave bits like NT set. This would | 359 | * Initialize eflags. Some BIOS's leave bits like NT set. This would |
@@ -360,9 +365,7 @@ default_entry: | |||
360 | 365 | ||
361 | #ifdef CONFIG_SMP | 366 | #ifdef CONFIG_SMP |
362 | cmpb $0, ready | 367 | cmpb $0, ready |
363 | jz 1f /* Initial CPU cleans BSS */ | 368 | jnz checkCPUtype |
364 | jmp checkCPUtype | ||
365 | 1: | ||
366 | #endif /* CONFIG_SMP */ | 369 | #endif /* CONFIG_SMP */ |
367 | 370 | ||
368 | /* | 371 | /* |
@@ -470,14 +473,7 @@ is386: movl $2,%ecx # set MP | |||
470 | 473 | ||
471 | cld # gcc2 wants the direction flag cleared at all times | 474 | cld # gcc2 wants the direction flag cleared at all times |
472 | pushl $0 # fake return address for unwinder | 475 | pushl $0 # fake return address for unwinder |
473 | #ifdef CONFIG_SMP | ||
474 | movb ready, %cl | ||
475 | movb $1, ready | 476 | movb $1, ready |
476 | cmpb $0,%cl # the first CPU calls start_kernel | ||
477 | je 1f | ||
478 | movl (stack_start), %esp | ||
479 | 1: | ||
480 | #endif /* CONFIG_SMP */ | ||
481 | jmp *(initial_code) | 477 | jmp *(initial_code) |
482 | 478 | ||
483 | /* | 479 | /* |
@@ -670,15 +666,15 @@ ENTRY(initial_page_table) | |||
670 | #endif | 666 | #endif |
671 | 667 | ||
672 | .data | 668 | .data |
669 | .balign 4 | ||
673 | ENTRY(stack_start) | 670 | ENTRY(stack_start) |
674 | .long init_thread_union+THREAD_SIZE | 671 | .long init_thread_union+THREAD_SIZE |
675 | .long __BOOT_DS | ||
676 | |||
677 | ready: .byte 0 | ||
678 | 672 | ||
679 | early_recursion_flag: | 673 | early_recursion_flag: |
680 | .long 0 | 674 | .long 0 |
681 | 675 | ||
676 | ready: .byte 0 | ||
677 | |||
682 | int_msg: | 678 | int_msg: |
683 | .asciz "Unknown interrupt or fault at: %p %p %p\n" | 679 | .asciz "Unknown interrupt or fault at: %p %p %p\n" |
684 | 680 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a7a3d1acc632..592214d99b43 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -620,7 +620,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) | |||
620 | * target processor state. | 620 | * target processor state. |
621 | */ | 621 | */ |
622 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, | 622 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, |
623 | (unsigned long)stack_start.sp); | 623 | stack_start); |
624 | 624 | ||
625 | /* | 625 | /* |
626 | * Run STARTUP IPI loop. | 626 | * Run STARTUP IPI loop. |
@@ -767,7 +767,7 @@ do_rest: | |||
767 | #endif | 767 | #endif |
768 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); | 768 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
769 | initial_code = (unsigned long)start_secondary; | 769 | initial_code = (unsigned long)start_secondary; |
770 | stack_start.sp = (void *) c_idle.idle->thread.sp; | 770 | stack_start = c_idle.idle->thread.sp; |
771 | 771 | ||
772 | /* start_ip had better be page-aligned! */ | 772 | /* start_ip had better be page-aligned! */ |
773 | start_ip = setup_trampoline(); | 773 | start_ip = setup_trampoline(); |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 8b830ca14ac4..d343b3c81f3c 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -256,7 +256,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | |||
256 | unsigned long pfn) | 256 | unsigned long pfn) |
257 | { | 257 | { |
258 | pgprot_t forbidden = __pgprot(0); | 258 | pgprot_t forbidden = __pgprot(0); |
259 | pgprot_t required = __pgprot(0); | ||
260 | 259 | ||
261 | /* | 260 | /* |
262 | * The BIOS area between 640k and 1Mb needs to be executable for | 261 | * The BIOS area between 640k and 1Mb needs to be executable for |
@@ -282,12 +281,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | |||
282 | if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, | 281 | if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, |
283 | __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) | 282 | __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) |
284 | pgprot_val(forbidden) |= _PAGE_RW; | 283 | pgprot_val(forbidden) |= _PAGE_RW; |
285 | /* | ||
286 | * .data and .bss should always be writable. | ||
287 | */ | ||
288 | if (within(address, (unsigned long)_sdata, (unsigned long)_edata) || | ||
289 | within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop)) | ||
290 | pgprot_val(required) |= _PAGE_RW; | ||
291 | 284 | ||
292 | #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) | 285 | #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) |
293 | /* | 286 | /* |
@@ -327,7 +320,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | |||
327 | #endif | 320 | #endif |
328 | 321 | ||
329 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); | 322 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); |
330 | prot = __pgprot(pgprot_val(prot) | pgprot_val(required)); | ||
331 | 323 | ||
332 | return prot; | 324 | return prot; |
333 | } | 325 | } |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index ddc81a06edb9..fd12d7ce7ff9 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -241,21 +241,15 @@ void __init xen_build_dynamic_phys_to_machine(void) | |||
241 | * As long as the mfn_list has enough entries to completely | 241 | * As long as the mfn_list has enough entries to completely |
242 | * fill a p2m page, pointing into the array is ok. But if | 242 | * fill a p2m page, pointing into the array is ok. But if |
243 | * not the entries beyond the last pfn will be undefined. | 243 | * not the entries beyond the last pfn will be undefined. |
244 | * And guessing that the 'what-ever-there-is' does not take it | ||
245 | * too kindly when changing it to invalid markers, a new page | ||
246 | * is allocated, initialized and filled with the valid part. | ||
247 | */ | 244 | */ |
248 | if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) { | 245 | if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) { |
249 | unsigned long p2midx; | 246 | unsigned long p2midx; |
250 | unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); | 247 | |
251 | p2m_init(p2m); | 248 | p2midx = max_pfn % P2M_PER_PAGE; |
252 | 249 | for ( ; p2midx < P2M_PER_PAGE; p2midx++) | |
253 | for (p2midx = 0; pfn + p2midx < max_pfn; p2midx++) { | 250 | mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY; |
254 | p2m[p2midx] = mfn_list[pfn + p2midx]; | 251 | } |
255 | } | 252 | p2m_top[topidx][mididx] = &mfn_list[pfn]; |
256 | p2m_top[topidx][mididx] = p2m; | ||
257 | } else | ||
258 | p2m_top[topidx][mididx] = &mfn_list[pfn]; | ||
259 | } | 253 | } |
260 | 254 | ||
261 | m2p_override_init(); | 255 | m2p_override_init(); |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index b5a7f928234b..a8a66a50d446 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -179,8 +179,13 @@ char * __init xen_memory_setup(void) | |||
179 | e820.nr_map = 0; | 179 | e820.nr_map = 0; |
180 | xen_extra_mem_start = mem_end; | 180 | xen_extra_mem_start = mem_end; |
181 | for (i = 0; i < memmap.nr_entries; i++) { | 181 | for (i = 0; i < memmap.nr_entries; i++) { |
182 | unsigned long long end = map[i].addr + map[i].size; | 182 | unsigned long long end; |
183 | 183 | ||
184 | /* Guard against non-page aligned E820 entries. */ | ||
185 | if (map[i].type == E820_RAM) | ||
186 | map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE; | ||
187 | |||
188 | end = map[i].addr + map[i].size; | ||
184 | if (map[i].type == E820_RAM && end > mem_end) { | 189 | if (map[i].type == E820_RAM && end > mem_end) { |
185 | /* RAM off the end - may be partially included */ | 190 | /* RAM off the end - may be partially included */ |
186 | u64 delta = min(map[i].size, end - mem_end); | 191 | u64 delta = min(map[i].size, end - mem_end); |
@@ -350,6 +355,7 @@ void __init xen_arch_setup(void) | |||
350 | boot_cpu_data.hlt_works_ok = 1; | 355 | boot_cpu_data.hlt_works_ok = 1; |
351 | #endif | 356 | #endif |
352 | pm_idle = default_idle; | 357 | pm_idle = default_idle; |
358 | boot_option_idle_override = IDLE_HALT; | ||
353 | 359 | ||
354 | fiddle_vdso(); | 360 | fiddle_vdso(); |
355 | } | 361 | } |