aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig20
-rw-r--r--arch/x86/Kconfig.cpu2
-rw-r--r--arch/x86/Kconfig.debug4
-rw-r--r--arch/x86/include/asm/cacheflush.h42
-rw-r--r--arch/x86/include/asm/cpu.h1
-rw-r--r--arch/x86/include/asm/jump_label.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h5
-rw-r--r--arch/x86/include/asm/numa_32.h2
-rw-r--r--arch/x86/include/asm/numa_64.h1
-rw-r--r--arch/x86/include/asm/paravirt.h5
-rw-r--r--arch/x86/include/asm/percpu.h32
-rw-r--r--arch/x86/include/asm/smp.h5
-rw-r--r--arch/x86/include/asm/system_64.h22
-rw-r--r--arch/x86/kernel/acpi/sleep.c15
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c12
-rw-r--r--arch/x86/kernel/dumpstack_64.c2
-rw-r--r--arch/x86/kernel/head_32.S30
-rw-r--r--arch/x86/kernel/process.c3
-rw-r--r--arch/x86/kernel/smpboot.c7
-rw-r--r--arch/x86/kernel/vmlinux.lds.S11
-rw-r--r--arch/x86/lguest/Kconfig1
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/mm/numa.c22
-rw-r--r--arch/x86/mm/numa_64.c24
-rw-r--r--arch/x86/mm/pageattr.c8
-rw-r--r--arch/x86/mm/srat_32.c1
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/irq.c2
-rw-r--r--arch/x86/xen/p2m.c12
-rw-r--r--arch/x86/xen/setup.c8
33 files changed, 147 insertions, 172 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 3ed5ad92b029..d5ed94d30aad 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -627,11 +627,11 @@ config APB_TIMER
627 as it is off-chip. APB timers are always running regardless of CPU 627 as it is off-chip. APB timers are always running regardless of CPU
628 C states, they are used as per CPU clockevent device when possible. 628 C states, they are used as per CPU clockevent device when possible.
629 629
630# Mark as embedded because too many people got it wrong. 630# Mark as expert because too many people got it wrong.
631# The code disables itself when not needed. 631# The code disables itself when not needed.
632config DMI 632config DMI
633 default y 633 default y
634 bool "Enable DMI scanning" if EMBEDDED 634 bool "Enable DMI scanning" if EXPERT
635 ---help--- 635 ---help---
636 Enabled scanning of DMI to identify machine quirks. Say Y 636 Enabled scanning of DMI to identify machine quirks. Say Y
637 here unless you have verified that your setup is not 637 here unless you have verified that your setup is not
@@ -639,7 +639,7 @@ config DMI
639 BIOS code. 639 BIOS code.
640 640
641config GART_IOMMU 641config GART_IOMMU
642 bool "GART IOMMU support" if EMBEDDED 642 bool "GART IOMMU support" if EXPERT
643 default y 643 default y
644 select SWIOTLB 644 select SWIOTLB
645 depends on X86_64 && PCI && AMD_NB 645 depends on X86_64 && PCI && AMD_NB
@@ -889,7 +889,7 @@ config X86_THERMAL_VECTOR
889 depends on X86_MCE_INTEL 889 depends on X86_MCE_INTEL
890 890
891config VM86 891config VM86
892 bool "Enable VM86 support" if EMBEDDED 892 bool "Enable VM86 support" if EXPERT
893 default y 893 default y
894 depends on X86_32 894 depends on X86_32
895 ---help--- 895 ---help---
@@ -1073,7 +1073,7 @@ endchoice
1073 1073
1074choice 1074choice
1075 depends on EXPERIMENTAL 1075 depends on EXPERIMENTAL
1076 prompt "Memory split" if EMBEDDED 1076 prompt "Memory split" if EXPERT
1077 default VMSPLIT_3G 1077 default VMSPLIT_3G
1078 depends on X86_32 1078 depends on X86_32
1079 ---help--- 1079 ---help---
@@ -1135,7 +1135,7 @@ config ARCH_DMA_ADDR_T_64BIT
1135 def_bool X86_64 || HIGHMEM64G 1135 def_bool X86_64 || HIGHMEM64G
1136 1136
1137config DIRECT_GBPAGES 1137config DIRECT_GBPAGES
1138 bool "Enable 1GB pages for kernel pagetables" if EMBEDDED 1138 bool "Enable 1GB pages for kernel pagetables" if EXPERT
1139 default y 1139 default y
1140 depends on X86_64 1140 depends on X86_64
1141 ---help--- 1141 ---help---
@@ -1369,7 +1369,7 @@ config MATH_EMULATION
1369 1369
1370config MTRR 1370config MTRR
1371 def_bool y 1371 def_bool y
1372 prompt "MTRR (Memory Type Range Register) support" if EMBEDDED 1372 prompt "MTRR (Memory Type Range Register) support" if EXPERT
1373 ---help--- 1373 ---help---
1374 On Intel P6 family processors (Pentium Pro, Pentium II and later) 1374 On Intel P6 family processors (Pentium Pro, Pentium II and later)
1375 the Memory Type Range Registers (MTRRs) may be used to control 1375 the Memory Type Range Registers (MTRRs) may be used to control
@@ -1435,7 +1435,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
1435 1435
1436config X86_PAT 1436config X86_PAT
1437 def_bool y 1437 def_bool y
1438 prompt "x86 PAT support" if EMBEDDED 1438 prompt "x86 PAT support" if EXPERT
1439 depends on MTRR 1439 depends on MTRR
1440 ---help--- 1440 ---help---
1441 Use PAT attributes to setup page level cache control. 1441 Use PAT attributes to setup page level cache control.
@@ -1539,7 +1539,7 @@ config KEXEC_JUMP
1539 code in physical address mode via KEXEC 1539 code in physical address mode via KEXEC
1540 1540
1541config PHYSICAL_START 1541config PHYSICAL_START
1542 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) 1542 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
1543 default "0x1000000" 1543 default "0x1000000"
1544 ---help--- 1544 ---help---
1545 This gives the physical address where the kernel is loaded. 1545 This gives the physical address where the kernel is loaded.
@@ -1934,7 +1934,7 @@ config PCI_MMCONFIG
1934 depends on X86_64 && PCI && ACPI 1934 depends on X86_64 && PCI && ACPI
1935 1935
1936config PCI_CNB20LE_QUIRK 1936config PCI_CNB20LE_QUIRK
1937 bool "Read CNB20LE Host Bridge Windows" if EMBEDDED 1937 bool "Read CNB20LE Host Bridge Windows" if EXPERT
1938 default n 1938 default n
1939 depends on PCI && EXPERIMENTAL 1939 depends on PCI && EXPERIMENTAL
1940 help 1940 help
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 15588a0ef466..283c5a6a03a6 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -424,7 +424,7 @@ config X86_DEBUGCTLMSR
424 depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) && !UML 424 depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) && !UML
425 425
426menuconfig PROCESSOR_SELECT 426menuconfig PROCESSOR_SELECT
427 bool "Supported processor vendors" if EMBEDDED 427 bool "Supported processor vendors" if EXPERT
428 ---help--- 428 ---help---
429 This lets you choose what x86 vendor support code your kernel 429 This lets you choose what x86 vendor support code your kernel
430 will include. 430 will include.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 45143bbcfe5e..615e18810f48 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -31,7 +31,7 @@ config X86_VERBOSE_BOOTUP
31 see errors. Disable this if you want silent bootup. 31 see errors. Disable this if you want silent bootup.
32 32
33config EARLY_PRINTK 33config EARLY_PRINTK
34 bool "Early printk" if EMBEDDED 34 bool "Early printk" if EXPERT
35 default y 35 default y
36 ---help--- 36 ---help---
37 Write kernel log output directly into the VGA buffer or to a serial 37 Write kernel log output directly into the VGA buffer or to a serial
@@ -138,7 +138,7 @@ config DEBUG_NX_TEST
138 138
139config DOUBLEFAULT 139config DOUBLEFAULT
140 default y 140 default y
141 bool "Enable doublefault exception handler" if EMBEDDED 141 bool "Enable doublefault exception handler" if EXPERT
142 depends on X86_32 142 depends on X86_32
143 ---help--- 143 ---help---
144 This option allows trapping of rare doublefault exceptions that 144 This option allows trapping of rare doublefault exceptions that
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 63e35ec9075c..62f084478f7e 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -1,48 +1,8 @@
1#ifndef _ASM_X86_CACHEFLUSH_H 1#ifndef _ASM_X86_CACHEFLUSH_H
2#define _ASM_X86_CACHEFLUSH_H 2#define _ASM_X86_CACHEFLUSH_H
3 3
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the intel. */ 4/* Caches aren't brain-dead on the intel. */
8static inline void flush_cache_all(void) { } 5#include <asm-generic/cacheflush.h>
9static inline void flush_cache_mm(struct mm_struct *mm) { }
10static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
11static inline void flush_cache_range(struct vm_area_struct *vma,
12 unsigned long start, unsigned long end) { }
13static inline void flush_cache_page(struct vm_area_struct *vma,
14 unsigned long vmaddr, unsigned long pfn) { }
15#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
16static inline void flush_dcache_page(struct page *page) { }
17static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
18static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
19static inline void flush_icache_range(unsigned long start,
20 unsigned long end) { }
21static inline void flush_icache_page(struct vm_area_struct *vma,
22 struct page *page) { }
23static inline void flush_icache_user_range(struct vm_area_struct *vma,
24 struct page *page,
25 unsigned long addr,
26 unsigned long len) { }
27static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
28static inline void flush_cache_vunmap(unsigned long start,
29 unsigned long end) { }
30
31static inline void copy_to_user_page(struct vm_area_struct *vma,
32 struct page *page, unsigned long vaddr,
33 void *dst, const void *src,
34 unsigned long len)
35{
36 memcpy(dst, src, len);
37}
38
39static inline void copy_from_user_page(struct vm_area_struct *vma,
40 struct page *page, unsigned long vaddr,
41 void *dst, const void *src,
42 unsigned long len)
43{
44 memcpy(dst, src, len);
45}
46 6
47#ifdef CONFIG_X86_PAT 7#ifdef CONFIG_X86_PAT
48/* 8/*
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 4fab24de26b1..6e6e7558e702 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -32,5 +32,6 @@ extern void arch_unregister_cpu(int);
32 32
33DECLARE_PER_CPU(int, cpu_state); 33DECLARE_PER_CPU(int, cpu_state);
34 34
35int __cpuinit mwait_usable(const struct cpuinfo_x86 *);
35 36
36#endif /* _ASM_X86_CPU_H */ 37#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index f52d42e80585..574dbc22893a 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -14,7 +14,7 @@
14 do { \ 14 do { \
15 asm goto("1:" \ 15 asm goto("1:" \
16 JUMP_LABEL_INITIAL_NOP \ 16 JUMP_LABEL_INITIAL_NOP \
17 ".pushsection __jump_table, \"a\" \n\t"\ 17 ".pushsection __jump_table, \"aw\" \n\t"\
18 _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ 18 _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \
19 ".popsection \n\t" \ 19 ".popsection \n\t" \
20 : : "i" (key) : : label); \ 20 : : "i" (key) : : label); \
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 4a2d4e0c18d9..8b5393ec1080 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
36 unsigned cpu = smp_processor_id(); 36 unsigned cpu = smp_processor_id();
37 37
38 if (likely(prev != next)) { 38 if (likely(prev != next)) {
39 /* stop flush ipis for the previous mm */
40 cpumask_clear_cpu(cpu, mm_cpumask(prev));
41#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 40 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
43 percpu_write(cpu_tlbstate.active_mm, next); 41 percpu_write(cpu_tlbstate.active_mm, next);
@@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
47 /* Re-load page tables */ 45 /* Re-load page tables */
48 load_cr3(next->pgd); 46 load_cr3(next->pgd);
49 47
48 /* stop flush ipis for the previous mm */
49 cpumask_clear_cpu(cpu, mm_cpumask(prev));
50
50 /* 51 /*
51 * load the LDT, if the LDT is different: 52 * load the LDT, if the LDT is different:
52 */ 53 */
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h
index a37229011b56..b0ef2b449a9d 100644
--- a/arch/x86/include/asm/numa_32.h
+++ b/arch/x86/include/asm/numa_32.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_NUMA_32_H 1#ifndef _ASM_X86_NUMA_32_H
2#define _ASM_X86_NUMA_32_H 2#define _ASM_X86_NUMA_32_H
3 3
4extern int numa_off;
5
4extern int pxm_to_nid(int pxm); 6extern int pxm_to_nid(int pxm);
5extern void numa_remove_cpu(int cpu); 7extern void numa_remove_cpu(int cpu);
6 8
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 5ae87285a502..0493be39607c 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -40,6 +40,7 @@ extern void __cpuinit numa_remove_cpu(int cpu);
40#ifdef CONFIG_NUMA_EMU 40#ifdef CONFIG_NUMA_EMU
41#define FAKE_NODE_MIN_SIZE ((u64)32 << 20) 41#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
42#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) 42#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
43void numa_emu_cmdline(char *);
43#endif /* CONFIG_NUMA_EMU */ 44#endif /* CONFIG_NUMA_EMU */
44#else 45#else
45static inline void init_cpu_to_node(void) { } 46static inline void init_cpu_to_node(void) { }
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 2071a8b2b32f..ebbc4d8ab170 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -558,13 +558,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
558static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 558static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
559 pmd_t *pmdp, pmd_t pmd) 559 pmd_t *pmdp, pmd_t pmd)
560{ 560{
561#if PAGETABLE_LEVELS >= 3
562 if (sizeof(pmdval_t) > sizeof(long)) 561 if (sizeof(pmdval_t) > sizeof(long))
563 /* 5 arg words */ 562 /* 5 arg words */
564 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd); 563 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
565 else 564 else
566 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, pmd.pmd); 565 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
567#endif 566 native_pmd_val(pmd));
568} 567}
569#endif 568#endif
570 569
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 8ee45167e817..7e172955ee57 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -273,34 +273,34 @@ do { \
273 typeof(var) pxo_new__ = (nval); \ 273 typeof(var) pxo_new__ = (nval); \
274 switch (sizeof(var)) { \ 274 switch (sizeof(var)) { \
275 case 1: \ 275 case 1: \
276 asm("\n1:mov "__percpu_arg(1)",%%al" \ 276 asm("\n\tmov "__percpu_arg(1)",%%al" \
277 "\n\tcmpxchgb %2, "__percpu_arg(1) \ 277 "\n1:\tcmpxchgb %2, "__percpu_arg(1) \
278 "\n\tjnz 1b" \ 278 "\n\tjnz 1b" \
279 : "=a" (pxo_ret__), "+m" (var) \ 279 : "=&a" (pxo_ret__), "+m" (var) \
280 : "q" (pxo_new__) \ 280 : "q" (pxo_new__) \
281 : "memory"); \ 281 : "memory"); \
282 break; \ 282 break; \
283 case 2: \ 283 case 2: \
284 asm("\n1:mov "__percpu_arg(1)",%%ax" \ 284 asm("\n\tmov "__percpu_arg(1)",%%ax" \
285 "\n\tcmpxchgw %2, "__percpu_arg(1) \ 285 "\n1:\tcmpxchgw %2, "__percpu_arg(1) \
286 "\n\tjnz 1b" \ 286 "\n\tjnz 1b" \
287 : "=a" (pxo_ret__), "+m" (var) \ 287 : "=&a" (pxo_ret__), "+m" (var) \
288 : "r" (pxo_new__) \ 288 : "r" (pxo_new__) \
289 : "memory"); \ 289 : "memory"); \
290 break; \ 290 break; \
291 case 4: \ 291 case 4: \
292 asm("\n1:mov "__percpu_arg(1)",%%eax" \ 292 asm("\n\tmov "__percpu_arg(1)",%%eax" \
293 "\n\tcmpxchgl %2, "__percpu_arg(1) \ 293 "\n1:\tcmpxchgl %2, "__percpu_arg(1) \
294 "\n\tjnz 1b" \ 294 "\n\tjnz 1b" \
295 : "=a" (pxo_ret__), "+m" (var) \ 295 : "=&a" (pxo_ret__), "+m" (var) \
296 : "r" (pxo_new__) \ 296 : "r" (pxo_new__) \
297 : "memory"); \ 297 : "memory"); \
298 break; \ 298 break; \
299 case 8: \ 299 case 8: \
300 asm("\n1:mov "__percpu_arg(1)",%%rax" \ 300 asm("\n\tmov "__percpu_arg(1)",%%rax" \
301 "\n\tcmpxchgq %2, "__percpu_arg(1) \ 301 "\n1:\tcmpxchgq %2, "__percpu_arg(1) \
302 "\n\tjnz 1b" \ 302 "\n\tjnz 1b" \
303 : "=a" (pxo_ret__), "+m" (var) \ 303 : "=&a" (pxo_ret__), "+m" (var) \
304 : "r" (pxo_new__) \ 304 : "r" (pxo_new__) \
305 : "memory"); \ 305 : "memory"); \
306 break; \ 306 break; \
@@ -414,8 +414,6 @@ do { \
414#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) 414#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
415#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) 415#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
416#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) 416#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
417#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
418#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
419 417
420#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) 418#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
421#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) 419#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
@@ -432,8 +430,6 @@ do { \
432#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) 430#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
433#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) 431#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
434#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) 432#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
435#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
436#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
437 433
438#ifndef CONFIG_M386 434#ifndef CONFIG_M386
439#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) 435#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
@@ -475,11 +471,15 @@ do { \
475#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 471#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
476#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 472#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
477#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) 473#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
474#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
475#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
478 476
479#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) 477#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
480#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 478#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
481#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 479#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
482#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 480#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
481#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
482#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
483#endif 483#endif
484 484
485/* This is not atomic against other CPUs -- CPU preemption needs to be off */ 485/* This is not atomic against other CPUs -- CPU preemption needs to be off */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4c2f63c7fc1b..1f4695136776 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
40DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); 40DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
41 41
42/* Static state in head.S used to set up a CPU */ 42/* Static state in head.S used to set up a CPU */
43extern struct { 43extern unsigned long stack_start; /* Initial stack pointer address */
44 void *sp;
45 unsigned short ss;
46} stack_start;
47 44
48struct smp_ops { 45struct smp_ops {
49 void (*smp_prepare_boot_cpu)(void); 46 void (*smp_prepare_boot_cpu)(void);
diff --git a/arch/x86/include/asm/system_64.h b/arch/x86/include/asm/system_64.h
deleted file mode 100644
index 1159e091ad09..000000000000
--- a/arch/x86/include/asm/system_64.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef _ASM_X86_SYSTEM_64_H
2#define _ASM_X86_SYSTEM_64_H
3
4#include <asm/segment.h>
5#include <asm/cmpxchg.h>
6
7
8static inline unsigned long read_cr8(void)
9{
10 unsigned long cr8;
11 asm volatile("movq %%cr8,%0" : "=r" (cr8));
12 return cr8;
13}
14
15static inline void write_cr8(unsigned long val)
16{
17 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
18}
19
20#include <linux/irqflags.h>
21
22#endif /* _ASM_X86_SYSTEM_64_H */
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 69fd72aa5594..68d1537b8c81 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -12,10 +12,8 @@
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <asm/segment.h> 13#include <asm/segment.h>
14#include <asm/desc.h> 14#include <asm/desc.h>
15
16#ifdef CONFIG_X86_32
17#include <asm/pgtable.h> 15#include <asm/pgtable.h>
18#endif 16#include <asm/cacheflush.h>
19 17
20#include "realmode/wakeup.h" 18#include "realmode/wakeup.h"
21#include "sleep.h" 19#include "sleep.h"
@@ -100,7 +98,7 @@ int acpi_save_state_mem(void)
100#else /* CONFIG_64BIT */ 98#else /* CONFIG_64BIT */
101 header->trampoline_segment = setup_trampoline() >> 4; 99 header->trampoline_segment = setup_trampoline() >> 4;
102#ifdef CONFIG_SMP 100#ifdef CONFIG_SMP
103 stack_start.sp = temp_stack + sizeof(temp_stack); 101 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
104 early_gdt_descr.address = 102 early_gdt_descr.address =
105 (unsigned long)get_cpu_gdt_table(smp_processor_id()); 103 (unsigned long)get_cpu_gdt_table(smp_processor_id());
106 initial_gs = per_cpu_offset(smp_processor_id()); 104 initial_gs = per_cpu_offset(smp_processor_id());
@@ -149,6 +147,15 @@ void __init acpi_reserve_wakeup_memory(void)
149 memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); 147 memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
150} 148}
151 149
150int __init acpi_configure_wakeup_memory(void)
151{
152 if (acpi_realmode)
153 set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT);
154
155 return 0;
156}
157arch_initcall(acpi_configure_wakeup_memory);
158
152 159
153static int __init acpi_sleep_setup(char *str) 160static int __init acpi_sleep_setup(char *str)
154{ 161{
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 7283e98deaae..ec2c19a7b8ef 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -45,6 +45,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ 45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ 46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ 47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
48 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ 49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
49 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
50 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ 51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
@@ -66,6 +67,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
66 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */ 67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
67 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */ 68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
68 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */ 69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
69 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ 71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
70 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */ 72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
71 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ 73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
@@ -87,6 +89,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
87 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ 89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */ 90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
89 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ 91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
90 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ 93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
91 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ 94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
92 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */ 95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index e12246ff5aa6..6f8c5e9da97f 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -59,6 +59,7 @@ struct thermal_state {
59 59
60/* Callback to handle core threshold interrupts */ 60/* Callback to handle core threshold interrupts */
61int (*platform_thermal_notify)(__u64 msr_val); 61int (*platform_thermal_notify)(__u64 msr_val);
62EXPORT_SYMBOL(platform_thermal_notify);
62 63
63static DEFINE_PER_CPU(struct thermal_state, thermal_state); 64static DEFINE_PER_CPU(struct thermal_state, thermal_state);
64 65
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 01c0f3ee6cc3..bebabec5b448 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
793} 793}
794 794
795/* 795/*
796 * MTRR initialization for all AP's 796 * Delayed MTRR initialization for all AP's
797 */ 797 */
798void mtrr_aps_init(void) 798void mtrr_aps_init(void)
799{ 799{
800 if (!use_intel()) 800 if (!use_intel())
801 return; 801 return;
802 802
803 /*
804 * Check if someone has requested the delay of AP MTRR initialization,
805 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
806 * then we are done.
807 */
808 if (!mtrr_aps_delayed_init)
809 return;
810
803 set_mtrr(~0U, 0, 0, 0); 811 set_mtrr(~0U, 0, 0, 0);
804 mtrr_aps_delayed_init = false; 812 mtrr_aps_delayed_init = false;
805} 813}
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index e56b9bfbabd1..f7a0993c1e7c 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -682,7 +682,7 @@ static int p4_validate_raw_event(struct perf_event *event)
682 * if an event is shared accross the logical threads 682 * if an event is shared accross the logical threads
683 * the user needs special permissions to be able to use it 683 * the user needs special permissions to be able to use it
684 */ 684 */
685 if (p4_event_bind_map[v].shared) { 685 if (p4_ht_active() && p4_event_bind_map[v].shared) {
686 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 686 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
687 return -EACCES; 687 return -EACCES;
688 } 688 }
@@ -727,7 +727,8 @@ static int p4_hw_config(struct perf_event *event)
727 event->hw.config = p4_set_ht_bit(event->hw.config); 727 event->hw.config = p4_set_ht_bit(event->hw.config);
728 728
729 if (event->attr.type == PERF_TYPE_RAW) { 729 if (event->attr.type == PERF_TYPE_RAW) {
730 730 struct p4_event_bind *bind;
731 unsigned int esel;
731 /* 732 /*
732 * Clear bits we reserve to be managed by kernel itself 733 * Clear bits we reserve to be managed by kernel itself
733 * and never allowed from a user space 734 * and never allowed from a user space
@@ -743,6 +744,13 @@ static int p4_hw_config(struct perf_event *event)
743 * bits since we keep additional info here (for cache events and etc) 744 * bits since we keep additional info here (for cache events and etc)
744 */ 745 */
745 event->hw.config |= event->attr.config; 746 event->hw.config |= event->attr.config;
747 bind = p4_config_get_bind(event->attr.config);
748 if (!bind) {
749 rc = -EINVAL;
750 goto out;
751 }
752 esel = P4_OPCODE_ESEL(bind->opcode);
753 event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
746 } 754 }
747 755
748 rc = x86_setup_perfctr(event); 756 rc = x86_setup_perfctr(event);
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 64101335de19..a6b6fcf7f0ae 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -149,13 +149,13 @@ void dump_trace(struct task_struct *task,
149 unsigned used = 0; 149 unsigned used = 0;
150 struct thread_info *tinfo; 150 struct thread_info *tinfo;
151 int graph = 0; 151 int graph = 0;
152 unsigned long dummy;
152 unsigned long bp; 153 unsigned long bp;
153 154
154 if (!task) 155 if (!task)
155 task = current; 156 task = current;
156 157
157 if (!stack) { 158 if (!stack) {
158 unsigned long dummy;
159 stack = &dummy; 159 stack = &dummy;
160 if (task && task != current) 160 if (task && task != current)
161 stack = (unsigned long *)task->thread.sp; 161 stack = (unsigned long *)task->thread.sp;
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index fc293dc8dc35..767d6c43de37 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
85 */ 85 */
86__HEAD 86__HEAD
87ENTRY(startup_32) 87ENTRY(startup_32)
88 movl pa(stack_start),%ecx
89
88 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 90 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
89 us to not reload segments */ 91 us to not reload segments */
90 testb $(1<<6), BP_loadflags(%esi) 92 testb $(1<<6), BP_loadflags(%esi)
@@ -99,7 +101,9 @@ ENTRY(startup_32)
99 movl %eax,%es 101 movl %eax,%es
100 movl %eax,%fs 102 movl %eax,%fs
101 movl %eax,%gs 103 movl %eax,%gs
104 movl %eax,%ss
1022: 1052:
106 leal -__PAGE_OFFSET(%ecx),%esp
103 107
104/* 108/*
105 * Clear BSS first so that there are no surprises... 109 * Clear BSS first so that there are no surprises...
@@ -145,8 +149,6 @@ ENTRY(startup_32)
145 * _brk_end is set up to point to the first "safe" location. 149 * _brk_end is set up to point to the first "safe" location.
146 * Mappings are created both at virtual address 0 (identity mapping) 150 * Mappings are created both at virtual address 0 (identity mapping)
147 * and PAGE_OFFSET for up to _end. 151 * and PAGE_OFFSET for up to _end.
148 *
149 * Note that the stack is not yet set up!
150 */ 152 */
151#ifdef CONFIG_X86_PAE 153#ifdef CONFIG_X86_PAE
152 154
@@ -282,6 +284,9 @@ ENTRY(startup_32_smp)
282 movl %eax,%es 284 movl %eax,%es
283 movl %eax,%fs 285 movl %eax,%fs
284 movl %eax,%gs 286 movl %eax,%gs
287 movl pa(stack_start),%ecx
288 movl %eax,%ss
289 leal -__PAGE_OFFSET(%ecx),%esp
285#endif /* CONFIG_SMP */ 290#endif /* CONFIG_SMP */
286default_entry: 291default_entry:
287 292
@@ -347,8 +352,8 @@ default_entry:
347 movl %eax,%cr0 /* ..and set paging (PG) bit */ 352 movl %eax,%cr0 /* ..and set paging (PG) bit */
348 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 353 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
3491: 3541:
350 /* Set up the stack pointer */ 355 /* Shift the stack pointer to a virtual address */
351 lss stack_start,%esp 356 addl $__PAGE_OFFSET, %esp
352 357
353/* 358/*
354 * Initialize eflags. Some BIOS's leave bits like NT set. This would 359 * Initialize eflags. Some BIOS's leave bits like NT set. This would
@@ -360,9 +365,7 @@ default_entry:
360 365
361#ifdef CONFIG_SMP 366#ifdef CONFIG_SMP
362 cmpb $0, ready 367 cmpb $0, ready
363 jz 1f /* Initial CPU cleans BSS */ 368 jnz checkCPUtype
364 jmp checkCPUtype
3651:
366#endif /* CONFIG_SMP */ 369#endif /* CONFIG_SMP */
367 370
368/* 371/*
@@ -470,14 +473,7 @@ is386: movl $2,%ecx # set MP
470 473
471 cld # gcc2 wants the direction flag cleared at all times 474 cld # gcc2 wants the direction flag cleared at all times
472 pushl $0 # fake return address for unwinder 475 pushl $0 # fake return address for unwinder
473#ifdef CONFIG_SMP
474 movb ready, %cl
475 movb $1, ready 476 movb $1, ready
476 cmpb $0,%cl # the first CPU calls start_kernel
477 je 1f
478 movl (stack_start), %esp
4791:
480#endif /* CONFIG_SMP */
481 jmp *(initial_code) 477 jmp *(initial_code)
482 478
483/* 479/*
@@ -670,15 +666,15 @@ ENTRY(initial_page_table)
670#endif 666#endif
671 667
672.data 668.data
669.balign 4
673ENTRY(stack_start) 670ENTRY(stack_start)
674 .long init_thread_union+THREAD_SIZE 671 .long init_thread_union+THREAD_SIZE
675 .long __BOOT_DS
676
677ready: .byte 0
678 672
679early_recursion_flag: 673early_recursion_flag:
680 .long 0 674 .long 0
681 675
676ready: .byte 0
677
682int_msg: 678int_msg:
683 .asciz "Unknown interrupt or fault at: %p %p %p\n" 679 .asciz "Unknown interrupt or fault at: %p %p %p\n"
684 680
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index d8286ed54ffa..e764fc05d700 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -14,6 +14,7 @@
14#include <linux/utsname.h> 14#include <linux/utsname.h>
15#include <trace/events/power.h> 15#include <trace/events/power.h>
16#include <linux/hw_breakpoint.h> 16#include <linux/hw_breakpoint.h>
17#include <asm/cpu.h>
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/apic.h> 19#include <asm/apic.h>
19#include <asm/syscalls.h> 20#include <asm/syscalls.h>
@@ -505,7 +506,7 @@ static void poll_idle(void)
505#define MWAIT_ECX_EXTENDED_INFO 0x01 506#define MWAIT_ECX_EXTENDED_INFO 0x01
506#define MWAIT_EDX_C1 0xf0 507#define MWAIT_EDX_C1 0xf0
507 508
508static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) 509int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
509{ 510{
510 u32 eax, ebx, ecx, edx; 511 u32 eax, ebx, ecx, edx;
511 512
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 763df77343dd..03273b6c272c 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -638,7 +638,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
638 * target processor state. 638 * target processor state.
639 */ 639 */
640 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, 640 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
641 (unsigned long)stack_start.sp); 641 stack_start);
642 642
643 /* 643 /*
644 * Run STARTUP IPI loop. 644 * Run STARTUP IPI loop.
@@ -785,7 +785,7 @@ do_rest:
785#endif 785#endif
786 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 786 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
787 initial_code = (unsigned long)start_secondary; 787 initial_code = (unsigned long)start_secondary;
788 stack_start.sp = (void *) c_idle.idle->thread.sp; 788 stack_start = c_idle.idle->thread.sp;
789 789
790 /* start_ip had better be page-aligned! */ 790 /* start_ip had better be page-aligned! */
791 start_ip = setup_trampoline(); 791 start_ip = setup_trampoline();
@@ -1402,8 +1402,9 @@ static inline void mwait_play_dead(void)
1402 unsigned int highest_subcstate = 0; 1402 unsigned int highest_subcstate = 0;
1403 int i; 1403 int i;
1404 void *mwait_ptr; 1404 void *mwait_ptr;
1405 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1405 1406
1406 if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT)) 1407 if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)))
1407 return; 1408 return;
1408 if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH)) 1409 if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
1409 return; 1410 return;
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index b34ab80fddd5..bf4700755184 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -34,9 +34,11 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
34#ifdef CONFIG_X86_32 34#ifdef CONFIG_X86_32
35OUTPUT_ARCH(i386) 35OUTPUT_ARCH(i386)
36ENTRY(phys_startup_32) 36ENTRY(phys_startup_32)
37jiffies = jiffies_64;
37#else 38#else
38OUTPUT_ARCH(i386:x86-64) 39OUTPUT_ARCH(i386:x86-64)
39ENTRY(phys_startup_64) 40ENTRY(phys_startup_64)
41jiffies_64 = jiffies;
40#endif 42#endif
41 43
42#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) 44#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
@@ -140,15 +142,6 @@ SECTIONS
140 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 142 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
141 143
142 DATA_DATA 144 DATA_DATA
143 /*
144 * Workaround a binutils (2.20.51.0.12 to 2.21.51.0.3) bug.
145 * This makes jiffies relocatable in such binutils
146 */
147#ifdef CONFIG_X86_32
148 jiffies = jiffies_64;
149#else
150 jiffies_64 = jiffies;
151#endif
152 CONSTRUCTORS 145 CONSTRUCTORS
153 146
154 /* rarely changed data like cpu maps */ 147 /* rarely changed data like cpu maps */
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index 38718041efc3..6e121a2a49e1 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -2,6 +2,7 @@ config LGUEST_GUEST
2 bool "Lguest guest support" 2 bool "Lguest guest support"
3 select PARAVIRT 3 select PARAVIRT
4 depends on X86_32 4 depends on X86_32
5 select VIRTUALIZATION
5 select VIRTIO 6 select VIRTIO
6 select VIRTIO_RING 7 select VIRTIO_RING
7 select VIRTIO_CONSOLE 8 select VIRTIO_CONSOLE
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 4996cf5f73a0..eba687f0cc0c 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -824,7 +824,7 @@ static void __init lguest_init_IRQ(void)
824 824
825 for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { 825 for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
826 /* Some systems map "vectors" to interrupts weirdly. Not us! */ 826 /* Some systems map "vectors" to interrupts weirdly. Not us! */
827 __get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR; 827 __this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
828 if (i != SYSCALL_VECTOR) 828 if (i != SYSCALL_VECTOR)
829 set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); 829 set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
830 } 830 }
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 787c52ca49c3..ebf6d7887a38 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -2,6 +2,28 @@
2#include <linux/topology.h> 2#include <linux/topology.h>
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/bootmem.h> 4#include <linux/bootmem.h>
5#include <asm/numa.h>
6#include <asm/acpi.h>
7
8int __initdata numa_off;
9
10static __init int numa_setup(char *opt)
11{
12 if (!opt)
13 return -EINVAL;
14 if (!strncmp(opt, "off", 3))
15 numa_off = 1;
16#ifdef CONFIG_NUMA_EMU
17 if (!strncmp(opt, "fake=", 5))
18 numa_emu_cmdline(opt + 5);
19#endif
20#ifdef CONFIG_ACPI_NUMA
21 if (!strncmp(opt, "noacpi", 6))
22 acpi_numa = -1;
23#endif
24 return 0;
25}
26early_param("numa", numa_setup);
5 27
6/* 28/*
7 * Which logical CPUs are on which nodes 29 * Which logical CPUs are on which nodes
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 1e72102e80c9..95ea1551eebc 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -30,7 +30,6 @@ s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
30 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 30 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
31}; 31};
32 32
33int numa_off __initdata;
34static unsigned long __initdata nodemap_addr; 33static unsigned long __initdata nodemap_addr;
35static unsigned long __initdata nodemap_size; 34static unsigned long __initdata nodemap_size;
36 35
@@ -263,6 +262,11 @@ static struct bootnode nodes[MAX_NUMNODES] __initdata;
263static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata; 262static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
264static char *cmdline __initdata; 263static char *cmdline __initdata;
265 264
265void __init numa_emu_cmdline(char *str)
266{
267 cmdline = str;
268}
269
266static int __init setup_physnodes(unsigned long start, unsigned long end, 270static int __init setup_physnodes(unsigned long start, unsigned long end,
267 int acpi, int amd) 271 int acpi, int amd)
268{ 272{
@@ -670,24 +674,6 @@ unsigned long __init numa_free_all_bootmem(void)
670 return pages; 674 return pages;
671} 675}
672 676
673static __init int numa_setup(char *opt)
674{
675 if (!opt)
676 return -EINVAL;
677 if (!strncmp(opt, "off", 3))
678 numa_off = 1;
679#ifdef CONFIG_NUMA_EMU
680 if (!strncmp(opt, "fake=", 5))
681 cmdline = opt + 5;
682#endif
683#ifdef CONFIG_ACPI_NUMA
684 if (!strncmp(opt, "noacpi", 6))
685 acpi_numa = -1;
686#endif
687 return 0;
688}
689early_param("numa", numa_setup);
690
691#ifdef CONFIG_NUMA 677#ifdef CONFIG_NUMA
692 678
693static __init int find_near_online_node(int node) 679static __init int find_near_online_node(int node)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8b830ca14ac4..d343b3c81f3c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -256,7 +256,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
256 unsigned long pfn) 256 unsigned long pfn)
257{ 257{
258 pgprot_t forbidden = __pgprot(0); 258 pgprot_t forbidden = __pgprot(0);
259 pgprot_t required = __pgprot(0);
260 259
261 /* 260 /*
262 * The BIOS area between 640k and 1Mb needs to be executable for 261 * The BIOS area between 640k and 1Mb needs to be executable for
@@ -282,12 +281,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
282 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, 281 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
283 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) 282 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
284 pgprot_val(forbidden) |= _PAGE_RW; 283 pgprot_val(forbidden) |= _PAGE_RW;
285 /*
286 * .data and .bss should always be writable.
287 */
288 if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
289 within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
290 pgprot_val(required) |= _PAGE_RW;
291 284
292#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) 285#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
293 /* 286 /*
@@ -327,7 +320,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
327#endif 320#endif
328 321
329 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); 322 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
330 prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
331 323
332 return prot; 324 return prot;
333} 325}
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index f16434568a51..ae96e7b8051d 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -59,7 +59,6 @@ static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS];
59static int __initdata num_memory_chunks; /* total number of memory chunks */ 59static int __initdata num_memory_chunks; /* total number of memory chunks */
60static u8 __initdata apicid_to_pxm[MAX_APICID]; 60static u8 __initdata apicid_to_pxm[MAX_APICID];
61 61
62int numa_off __initdata;
63int acpi_numa __initdata; 62int acpi_numa __initdata;
64 63
65static __init void bad_srat(void) 64static __init void bad_srat(void)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 7e8d3bc80af6..50542efe45fb 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1194,7 +1194,7 @@ asmlinkage void __init xen_start_kernel(void)
1194 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 1194 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1195 1195
1196 local_irq_disable(); 1196 local_irq_disable();
1197 early_boot_irqs_off(); 1197 early_boot_irqs_disabled = true;
1198 1198
1199 memblock_init(); 1199 memblock_init();
1200 1200
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 9d30105a0c4a..6a6fe8939645 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -126,7 +126,7 @@ static const struct pv_irq_ops xen_irq_ops __initdata = {
126#endif 126#endif
127}; 127};
128 128
129void __init xen_init_irq_ops() 129void __init xen_init_irq_ops(void)
130{ 130{
131 pv_irq_ops = xen_irq_ops; 131 pv_irq_ops = xen_irq_ops;
132 x86_init.irqs.intr_init = xen_init_IRQ; 132 x86_init.irqs.intr_init = xen_init_IRQ;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8f2251d2a3f8..fd12d7ce7ff9 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -237,6 +237,18 @@ void __init xen_build_dynamic_phys_to_machine(void)
237 p2m_top[topidx] = mid; 237 p2m_top[topidx] = mid;
238 } 238 }
239 239
240 /*
241 * As long as the mfn_list has enough entries to completely
242 * fill a p2m page, pointing into the array is ok. But if
243 * not the entries beyond the last pfn will be undefined.
244 */
245 if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
246 unsigned long p2midx;
247
248 p2midx = max_pfn % P2M_PER_PAGE;
249 for ( ; p2midx < P2M_PER_PAGE; p2midx++)
250 mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
251 }
240 p2m_top[topidx][mididx] = &mfn_list[pfn]; 252 p2m_top[topidx][mididx] = &mfn_list[pfn];
241 } 253 }
242 254
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b5a7f928234b..a8a66a50d446 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -179,8 +179,13 @@ char * __init xen_memory_setup(void)
179 e820.nr_map = 0; 179 e820.nr_map = 0;
180 xen_extra_mem_start = mem_end; 180 xen_extra_mem_start = mem_end;
181 for (i = 0; i < memmap.nr_entries; i++) { 181 for (i = 0; i < memmap.nr_entries; i++) {
182 unsigned long long end = map[i].addr + map[i].size; 182 unsigned long long end;
183 183
184 /* Guard against non-page aligned E820 entries. */
185 if (map[i].type == E820_RAM)
186 map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE;
187
188 end = map[i].addr + map[i].size;
184 if (map[i].type == E820_RAM && end > mem_end) { 189 if (map[i].type == E820_RAM && end > mem_end) {
185 /* RAM off the end - may be partially included */ 190 /* RAM off the end - may be partially included */
186 u64 delta = min(map[i].size, end - mem_end); 191 u64 delta = min(map[i].size, end - mem_end);
@@ -350,6 +355,7 @@ void __init xen_arch_setup(void)
350 boot_cpu_data.hlt_works_ok = 1; 355 boot_cpu_data.hlt_works_ok = 1;
351#endif 356#endif
352 pm_idle = default_idle; 357 pm_idle = default_idle;
358 boot_option_idle_override = IDLE_HALT;
353 359
354 fiddle_vdso(); 360 fiddle_vdso();
355} 361}