aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-02-06 15:03:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-02-06 15:03:10 -0500
commit07675f484bb9de0c4bd0722e174cb27043dbacef (patch)
treea82f9378da66021881c06a348eb3e767ded4e04f /arch
parent585a7c666e67b7a6757bd12b734e22f4f76998a2 (diff)
parent11d4c3f9b671720e80353dd7e433ff2bf65e9500 (diff)
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86-32: Make sure the stack is set up before we use it x86, mtrr: Avoid MTRR reprogramming on BP during boot on UP platforms x86, nx: Don't force pages RW when setting NX bits
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/smp.h5
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c10
-rw-r--r--arch/x86/kernel/head_32.S30
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/mm/pageattr.c8
6 files changed, 26 insertions, 33 deletions
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4c2f63c7fc1b..1f4695136776 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
40DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); 40DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
41 41
42/* Static state in head.S used to set up a CPU */ 42/* Static state in head.S used to set up a CPU */
43extern struct { 43extern unsigned long stack_start; /* Initial stack pointer address */
44 void *sp;
45 unsigned short ss;
46} stack_start;
47 44
48struct smp_ops { 45struct smp_ops {
49 void (*smp_prepare_boot_cpu)(void); 46 void (*smp_prepare_boot_cpu)(void);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 69fd72aa5594..4d9ebbab2230 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -100,7 +100,7 @@ int acpi_save_state_mem(void)
100#else /* CONFIG_64BIT */ 100#else /* CONFIG_64BIT */
101 header->trampoline_segment = setup_trampoline() >> 4; 101 header->trampoline_segment = setup_trampoline() >> 4;
102#ifdef CONFIG_SMP 102#ifdef CONFIG_SMP
103 stack_start.sp = temp_stack + sizeof(temp_stack); 103 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
104 early_gdt_descr.address = 104 early_gdt_descr.address =
105 (unsigned long)get_cpu_gdt_table(smp_processor_id()); 105 (unsigned long)get_cpu_gdt_table(smp_processor_id());
106 initial_gs = per_cpu_offset(smp_processor_id()); 106 initial_gs = per_cpu_offset(smp_processor_id());
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 01c0f3ee6cc3..bebabec5b448 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
793} 793}
794 794
795/* 795/*
796 * MTRR initialization for all AP's 796 * Delayed MTRR initialization for all AP's
797 */ 797 */
798void mtrr_aps_init(void) 798void mtrr_aps_init(void)
799{ 799{
800 if (!use_intel()) 800 if (!use_intel())
801 return; 801 return;
802 802
803 /*
804 * Check if someone has requested the delay of AP MTRR initialization,
805 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
806 * then we are done.
807 */
808 if (!mtrr_aps_delayed_init)
809 return;
810
803 set_mtrr(~0U, 0, 0, 0); 811 set_mtrr(~0U, 0, 0, 0);
804 mtrr_aps_delayed_init = false; 812 mtrr_aps_delayed_init = false;
805} 813}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index fc293dc8dc35..767d6c43de37 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
85 */ 85 */
86__HEAD 86__HEAD
87ENTRY(startup_32) 87ENTRY(startup_32)
88 movl pa(stack_start),%ecx
89
88 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 90 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
89 us to not reload segments */ 91 us to not reload segments */
90 testb $(1<<6), BP_loadflags(%esi) 92 testb $(1<<6), BP_loadflags(%esi)
@@ -99,7 +101,9 @@ ENTRY(startup_32)
99 movl %eax,%es 101 movl %eax,%es
100 movl %eax,%fs 102 movl %eax,%fs
101 movl %eax,%gs 103 movl %eax,%gs
104 movl %eax,%ss
1022: 1052:
106 leal -__PAGE_OFFSET(%ecx),%esp
103 107
104/* 108/*
105 * Clear BSS first so that there are no surprises... 109 * Clear BSS first so that there are no surprises...
@@ -145,8 +149,6 @@ ENTRY(startup_32)
145 * _brk_end is set up to point to the first "safe" location. 149 * _brk_end is set up to point to the first "safe" location.
146 * Mappings are created both at virtual address 0 (identity mapping) 150 * Mappings are created both at virtual address 0 (identity mapping)
147 * and PAGE_OFFSET for up to _end. 151 * and PAGE_OFFSET for up to _end.
148 *
149 * Note that the stack is not yet set up!
150 */ 152 */
151#ifdef CONFIG_X86_PAE 153#ifdef CONFIG_X86_PAE
152 154
@@ -282,6 +284,9 @@ ENTRY(startup_32_smp)
282 movl %eax,%es 284 movl %eax,%es
283 movl %eax,%fs 285 movl %eax,%fs
284 movl %eax,%gs 286 movl %eax,%gs
287 movl pa(stack_start),%ecx
288 movl %eax,%ss
289 leal -__PAGE_OFFSET(%ecx),%esp
285#endif /* CONFIG_SMP */ 290#endif /* CONFIG_SMP */
286default_entry: 291default_entry:
287 292
@@ -347,8 +352,8 @@ default_entry:
347 movl %eax,%cr0 /* ..and set paging (PG) bit */ 352 movl %eax,%cr0 /* ..and set paging (PG) bit */
348 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 353 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
3491: 3541:
350 /* Set up the stack pointer */ 355 /* Shift the stack pointer to a virtual address */
351 lss stack_start,%esp 356 addl $__PAGE_OFFSET, %esp
352 357
353/* 358/*
354 * Initialize eflags. Some BIOS's leave bits like NT set. This would 359 * Initialize eflags. Some BIOS's leave bits like NT set. This would
@@ -360,9 +365,7 @@ default_entry:
360 365
361#ifdef CONFIG_SMP 366#ifdef CONFIG_SMP
362 cmpb $0, ready 367 cmpb $0, ready
363 jz 1f /* Initial CPU cleans BSS */ 368 jnz checkCPUtype
364 jmp checkCPUtype
3651:
366#endif /* CONFIG_SMP */ 369#endif /* CONFIG_SMP */
367 370
368/* 371/*
@@ -470,14 +473,7 @@ is386: movl $2,%ecx # set MP
470 473
471 cld # gcc2 wants the direction flag cleared at all times 474 cld # gcc2 wants the direction flag cleared at all times
472 pushl $0 # fake return address for unwinder 475 pushl $0 # fake return address for unwinder
473#ifdef CONFIG_SMP
474 movb ready, %cl
475 movb $1, ready 476 movb $1, ready
476 cmpb $0,%cl # the first CPU calls start_kernel
477 je 1f
478 movl (stack_start), %esp
4791:
480#endif /* CONFIG_SMP */
481 jmp *(initial_code) 477 jmp *(initial_code)
482 478
483/* 479/*
@@ -670,15 +666,15 @@ ENTRY(initial_page_table)
670#endif 666#endif
671 667
672.data 668.data
669.balign 4
673ENTRY(stack_start) 670ENTRY(stack_start)
674 .long init_thread_union+THREAD_SIZE 671 .long init_thread_union+THREAD_SIZE
675 .long __BOOT_DS
676
677ready: .byte 0
678 672
679early_recursion_flag: 673early_recursion_flag:
680 .long 0 674 .long 0
681 675
676ready: .byte 0
677
682int_msg: 678int_msg:
683 .asciz "Unknown interrupt or fault at: %p %p %p\n" 679 .asciz "Unknown interrupt or fault at: %p %p %p\n"
684 680
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 0cbe8c0b35ed..03273b6c272c 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -638,7 +638,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
638 * target processor state. 638 * target processor state.
639 */ 639 */
640 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, 640 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
641 (unsigned long)stack_start.sp); 641 stack_start);
642 642
643 /* 643 /*
644 * Run STARTUP IPI loop. 644 * Run STARTUP IPI loop.
@@ -785,7 +785,7 @@ do_rest:
785#endif 785#endif
786 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 786 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
787 initial_code = (unsigned long)start_secondary; 787 initial_code = (unsigned long)start_secondary;
788 stack_start.sp = (void *) c_idle.idle->thread.sp; 788 stack_start = c_idle.idle->thread.sp;
789 789
790 /* start_ip had better be page-aligned! */ 790 /* start_ip had better be page-aligned! */
791 start_ip = setup_trampoline(); 791 start_ip = setup_trampoline();
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8b830ca14ac4..d343b3c81f3c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -256,7 +256,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
256 unsigned long pfn) 256 unsigned long pfn)
257{ 257{
258 pgprot_t forbidden = __pgprot(0); 258 pgprot_t forbidden = __pgprot(0);
259 pgprot_t required = __pgprot(0);
260 259
261 /* 260 /*
262 * The BIOS area between 640k and 1Mb needs to be executable for 261 * The BIOS area between 640k and 1Mb needs to be executable for
@@ -282,12 +281,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
282 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, 281 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
283 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) 282 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
284 pgprot_val(forbidden) |= _PAGE_RW; 283 pgprot_val(forbidden) |= _PAGE_RW;
285 /*
286 * .data and .bss should always be writable.
287 */
288 if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
289 within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
290 pgprot_val(required) |= _PAGE_RW;
291 284
292#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) 285#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
293 /* 286 /*
@@ -327,7 +320,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
327#endif 320#endif
328 321
329 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); 322 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
330 prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
331 323
332 return prot; 324 return prot;
333} 325}