aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-02-14 07:28:29 -0500
committerIngo Molnar <mingo@elte.hu>2011-02-14 07:28:31 -0500
commitb366801c95bdbeda811ac9668a3943051a18c188 (patch)
tree79f08da2ad33a3159e67df344a4c411c88da7db6 /arch/x86
parenteff9073790e1286aa12bf1c65814d3e0132b12e1 (diff)
parent100b33c8bd8a3235fd0b7948338d6cbb3db3c63d (diff)
Merge commit 'v2.6.38-rc4' into x86/numa
Merge reason: Merge latest fixes before applying new patch. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/mmu_context.h5
-rw-r--r--arch/x86/include/asm/smp.h5
-rw-r--r--arch/x86/kernel/acpi/sleep.c15
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c12
-rw-r--r--arch/x86/kernel/head_32.S30
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/mm/pageattr.c8
8 files changed, 49 insertions, 40 deletions
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 4a2d4e0c18d9..8b5393ec1080 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
36 unsigned cpu = smp_processor_id(); 36 unsigned cpu = smp_processor_id();
37 37
38 if (likely(prev != next)) { 38 if (likely(prev != next)) {
39 /* stop flush ipis for the previous mm */
40 cpumask_clear_cpu(cpu, mm_cpumask(prev));
41#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 40 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
43 percpu_write(cpu_tlbstate.active_mm, next); 41 percpu_write(cpu_tlbstate.active_mm, next);
@@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
47 /* Re-load page tables */ 45 /* Re-load page tables */
48 load_cr3(next->pgd); 46 load_cr3(next->pgd);
49 47
48 /* stop flush ipis for the previous mm */
49 cpumask_clear_cpu(cpu, mm_cpumask(prev));
50
50 /* 51 /*
51 * load the LDT, if the LDT is different: 52 * load the LDT, if the LDT is different:
52 */ 53 */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 75927822c5c8..b296ca6f40bb 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -43,10 +43,7 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
43#endif 43#endif
44 44
45/* Static state in head.S used to set up a CPU */ 45/* Static state in head.S used to set up a CPU */
46extern struct { 46extern unsigned long stack_start; /* Initial stack pointer address */
47 void *sp;
48 unsigned short ss;
49} stack_start;
50 47
51struct smp_ops { 48struct smp_ops {
52 void (*smp_prepare_boot_cpu)(void); 49 void (*smp_prepare_boot_cpu)(void);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 69fd72aa5594..68d1537b8c81 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -12,10 +12,8 @@
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <asm/segment.h> 13#include <asm/segment.h>
14#include <asm/desc.h> 14#include <asm/desc.h>
15
16#ifdef CONFIG_X86_32
17#include <asm/pgtable.h> 15#include <asm/pgtable.h>
18#endif 16#include <asm/cacheflush.h>
19 17
20#include "realmode/wakeup.h" 18#include "realmode/wakeup.h"
21#include "sleep.h" 19#include "sleep.h"
@@ -100,7 +98,7 @@ int acpi_save_state_mem(void)
100#else /* CONFIG_64BIT */ 98#else /* CONFIG_64BIT */
101 header->trampoline_segment = setup_trampoline() >> 4; 99 header->trampoline_segment = setup_trampoline() >> 4;
102#ifdef CONFIG_SMP 100#ifdef CONFIG_SMP
103 stack_start.sp = temp_stack + sizeof(temp_stack); 101 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
104 early_gdt_descr.address = 102 early_gdt_descr.address =
105 (unsigned long)get_cpu_gdt_table(smp_processor_id()); 103 (unsigned long)get_cpu_gdt_table(smp_processor_id());
106 initial_gs = per_cpu_offset(smp_processor_id()); 104 initial_gs = per_cpu_offset(smp_processor_id());
@@ -149,6 +147,15 @@ void __init acpi_reserve_wakeup_memory(void)
149 memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); 147 memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
150} 148}
151 149
150int __init acpi_configure_wakeup_memory(void)
151{
152 if (acpi_realmode)
153 set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT);
154
155 return 0;
156}
157arch_initcall(acpi_configure_wakeup_memory);
158
152 159
153static int __init acpi_sleep_setup(char *str) 160static int __init acpi_sleep_setup(char *str)
154{ 161{
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 01c0f3ee6cc3..bebabec5b448 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
793} 793}
794 794
795/* 795/*
796 * MTRR initialization for all AP's 796 * Delayed MTRR initialization for all AP's
797 */ 797 */
798void mtrr_aps_init(void) 798void mtrr_aps_init(void)
799{ 799{
800 if (!use_intel()) 800 if (!use_intel())
801 return; 801 return;
802 802
803 /*
804 * Check if someone has requested the delay of AP MTRR initialization,
805 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
806 * then we are done.
807 */
808 if (!mtrr_aps_delayed_init)
809 return;
810
803 set_mtrr(~0U, 0, 0, 0); 811 set_mtrr(~0U, 0, 0, 0);
804 mtrr_aps_delayed_init = false; 812 mtrr_aps_delayed_init = false;
805} 813}
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index e56b9bfbabd1..f7a0993c1e7c 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -682,7 +682,7 @@ static int p4_validate_raw_event(struct perf_event *event)
682 * if an event is shared accross the logical threads 682 * if an event is shared accross the logical threads
683 * the user needs special permissions to be able to use it 683 * the user needs special permissions to be able to use it
684 */ 684 */
685 if (p4_event_bind_map[v].shared) { 685 if (p4_ht_active() && p4_event_bind_map[v].shared) {
686 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 686 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
687 return -EACCES; 687 return -EACCES;
688 } 688 }
@@ -727,7 +727,8 @@ static int p4_hw_config(struct perf_event *event)
727 event->hw.config = p4_set_ht_bit(event->hw.config); 727 event->hw.config = p4_set_ht_bit(event->hw.config);
728 728
729 if (event->attr.type == PERF_TYPE_RAW) { 729 if (event->attr.type == PERF_TYPE_RAW) {
730 730 struct p4_event_bind *bind;
731 unsigned int esel;
731 /* 732 /*
732 * Clear bits we reserve to be managed by kernel itself 733 * Clear bits we reserve to be managed by kernel itself
733 * and never allowed from a user space 734 * and never allowed from a user space
@@ -743,6 +744,13 @@ static int p4_hw_config(struct perf_event *event)
743 * bits since we keep additional info here (for cache events and etc) 744 * bits since we keep additional info here (for cache events and etc)
744 */ 745 */
745 event->hw.config |= event->attr.config; 746 event->hw.config |= event->attr.config;
747 bind = p4_config_get_bind(event->attr.config);
748 if (!bind) {
749 rc = -EINVAL;
750 goto out;
751 }
752 esel = P4_OPCODE_ESEL(bind->opcode);
753 event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
746 } 754 }
747 755
748 rc = x86_setup_perfctr(event); 756 rc = x86_setup_perfctr(event);
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index fc293dc8dc35..767d6c43de37 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
85 */ 85 */
86__HEAD 86__HEAD
87ENTRY(startup_32) 87ENTRY(startup_32)
88 movl pa(stack_start),%ecx
89
88 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 90 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
89 us to not reload segments */ 91 us to not reload segments */
90 testb $(1<<6), BP_loadflags(%esi) 92 testb $(1<<6), BP_loadflags(%esi)
@@ -99,7 +101,9 @@ ENTRY(startup_32)
99 movl %eax,%es 101 movl %eax,%es
100 movl %eax,%fs 102 movl %eax,%fs
101 movl %eax,%gs 103 movl %eax,%gs
104 movl %eax,%ss
1022: 1052:
106 leal -__PAGE_OFFSET(%ecx),%esp
103 107
104/* 108/*
105 * Clear BSS first so that there are no surprises... 109 * Clear BSS first so that there are no surprises...
@@ -145,8 +149,6 @@ ENTRY(startup_32)
145 * _brk_end is set up to point to the first "safe" location. 149 * _brk_end is set up to point to the first "safe" location.
146 * Mappings are created both at virtual address 0 (identity mapping) 150 * Mappings are created both at virtual address 0 (identity mapping)
147 * and PAGE_OFFSET for up to _end. 151 * and PAGE_OFFSET for up to _end.
148 *
149 * Note that the stack is not yet set up!
150 */ 152 */
151#ifdef CONFIG_X86_PAE 153#ifdef CONFIG_X86_PAE
152 154
@@ -282,6 +284,9 @@ ENTRY(startup_32_smp)
282 movl %eax,%es 284 movl %eax,%es
283 movl %eax,%fs 285 movl %eax,%fs
284 movl %eax,%gs 286 movl %eax,%gs
287 movl pa(stack_start),%ecx
288 movl %eax,%ss
289 leal -__PAGE_OFFSET(%ecx),%esp
285#endif /* CONFIG_SMP */ 290#endif /* CONFIG_SMP */
286default_entry: 291default_entry:
287 292
@@ -347,8 +352,8 @@ default_entry:
347 movl %eax,%cr0 /* ..and set paging (PG) bit */ 352 movl %eax,%cr0 /* ..and set paging (PG) bit */
348 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 353 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
3491: 3541:
350 /* Set up the stack pointer */ 355 /* Shift the stack pointer to a virtual address */
351 lss stack_start,%esp 356 addl $__PAGE_OFFSET, %esp
352 357
353/* 358/*
354 * Initialize eflags. Some BIOS's leave bits like NT set. This would 359 * Initialize eflags. Some BIOS's leave bits like NT set. This would
@@ -360,9 +365,7 @@ default_entry:
360 365
361#ifdef CONFIG_SMP 366#ifdef CONFIG_SMP
362 cmpb $0, ready 367 cmpb $0, ready
363 jz 1f /* Initial CPU cleans BSS */ 368 jnz checkCPUtype
364 jmp checkCPUtype
3651:
366#endif /* CONFIG_SMP */ 369#endif /* CONFIG_SMP */
367 370
368/* 371/*
@@ -470,14 +473,7 @@ is386: movl $2,%ecx # set MP
470 473
471 cld # gcc2 wants the direction flag cleared at all times 474 cld # gcc2 wants the direction flag cleared at all times
472 pushl $0 # fake return address for unwinder 475 pushl $0 # fake return address for unwinder
473#ifdef CONFIG_SMP
474 movb ready, %cl
475 movb $1, ready 476 movb $1, ready
476 cmpb $0,%cl # the first CPU calls start_kernel
477 je 1f
478 movl (stack_start), %esp
4791:
480#endif /* CONFIG_SMP */
481 jmp *(initial_code) 477 jmp *(initial_code)
482 478
483/* 479/*
@@ -670,15 +666,15 @@ ENTRY(initial_page_table)
670#endif 666#endif
671 667
672.data 668.data
669.balign 4
673ENTRY(stack_start) 670ENTRY(stack_start)
674 .long init_thread_union+THREAD_SIZE 671 .long init_thread_union+THREAD_SIZE
675 .long __BOOT_DS
676
677ready: .byte 0
678 672
679early_recursion_flag: 673early_recursion_flag:
680 .long 0 674 .long 0
681 675
676ready: .byte 0
677
682int_msg: 678int_msg:
683 .asciz "Unknown interrupt or fault at: %p %p %p\n" 679 .asciz "Unknown interrupt or fault at: %p %p %p\n"
684 680
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 522b2173888a..8886ef36d5dd 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -577,7 +577,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
577 * target processor state. 577 * target processor state.
578 */ 578 */
579 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, 579 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
580 (unsigned long)stack_start.sp); 580 stack_start);
581 581
582 /* 582 /*
583 * Run STARTUP IPI loop. 583 * Run STARTUP IPI loop.
@@ -724,7 +724,7 @@ do_rest:
724#endif 724#endif
725 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 725 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
726 initial_code = (unsigned long)start_secondary; 726 initial_code = (unsigned long)start_secondary;
727 stack_start.sp = (void *) c_idle.idle->thread.sp; 727 stack_start = c_idle.idle->thread.sp;
728 728
729 /* start_ip had better be page-aligned! */ 729 /* start_ip had better be page-aligned! */
730 start_ip = setup_trampoline(); 730 start_ip = setup_trampoline();
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8b830ca14ac4..d343b3c81f3c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -256,7 +256,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
256 unsigned long pfn) 256 unsigned long pfn)
257{ 257{
258 pgprot_t forbidden = __pgprot(0); 258 pgprot_t forbidden = __pgprot(0);
259 pgprot_t required = __pgprot(0);
260 259
261 /* 260 /*
262 * The BIOS area between 640k and 1Mb needs to be executable for 261 * The BIOS area between 640k and 1Mb needs to be executable for
@@ -282,12 +281,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
282 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, 281 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
283 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) 282 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
284 pgprot_val(forbidden) |= _PAGE_RW; 283 pgprot_val(forbidden) |= _PAGE_RW;
285 /*
286 * .data and .bss should always be writable.
287 */
288 if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
289 within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
290 pgprot_val(required) |= _PAGE_RW;
291 284
292#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) 285#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
293 /* 286 /*
@@ -327,7 +320,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
327#endif 320#endif
328 321
329 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); 322 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
330 prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
331 323
332 return prot; 324 return prot;
333} 325}