aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-02-02 01:10:03 -0500
committerIngo Molnar <mingo@elte.hu>2011-02-02 01:10:06 -0500
commit8104a4775ad8a7863af0b898224b15aa708582db (patch)
tree71380e4db36e9d91e521d0a98be74981e32a6700 /arch/x86
parentf6bbc1daac964da551130dbf01809d3fbd178b2d (diff)
parentebf53826e105f488f4f628703a108e98940d1dc5 (diff)
Merge commit 'v2.6.38-rc3' into perf/core
Merge reason: Pick up latest fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig20
-rw-r--r--arch/x86/Kconfig.cpu2
-rw-r--r--arch/x86/Kconfig.debug4
-rw-r--r--arch/x86/include/asm/cacheflush.h42
-rw-r--r--arch/x86/include/asm/cpu.h1
-rw-r--r--arch/x86/include/asm/jump_label.h2
-rw-r--r--arch/x86/include/asm/numa_32.h2
-rw-r--r--arch/x86/include/asm/numa_64.h1
-rw-r--r--arch/x86/include/asm/paravirt.h5
-rw-r--r--arch/x86/include/asm/percpu.h32
-rw-r--r--arch/x86/include/asm/system_64.h22
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c1
-rw-r--r--arch/x86/kernel/dumpstack_64.c2
-rw-r--r--arch/x86/kernel/process.c3
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/kernel/vmlinux.lds.S11
-rw-r--r--arch/x86/lguest/Kconfig1
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/mm/numa.c22
-rw-r--r--arch/x86/mm/numa_64.c24
-rw-r--r--arch/x86/mm/srat_32.c1
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/irq.c2
-rw-r--r--arch/x86/xen/p2m.c12
-rw-r--r--arch/x86/xen/setup.c8
26 files changed, 98 insertions, 132 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 3ed5ad92b029..d5ed94d30aad 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -627,11 +627,11 @@ config APB_TIMER
627 as it is off-chip. APB timers are always running regardless of CPU 627 as it is off-chip. APB timers are always running regardless of CPU
628 C states, they are used as per CPU clockevent device when possible. 628 C states, they are used as per CPU clockevent device when possible.
629 629
630# Mark as embedded because too many people got it wrong. 630# Mark as expert because too many people got it wrong.
631# The code disables itself when not needed. 631# The code disables itself when not needed.
632config DMI 632config DMI
633 default y 633 default y
634 bool "Enable DMI scanning" if EMBEDDED 634 bool "Enable DMI scanning" if EXPERT
635 ---help--- 635 ---help---
636 Enabled scanning of DMI to identify machine quirks. Say Y 636 Enabled scanning of DMI to identify machine quirks. Say Y
637 here unless you have verified that your setup is not 637 here unless you have verified that your setup is not
@@ -639,7 +639,7 @@ config DMI
639 BIOS code. 639 BIOS code.
640 640
641config GART_IOMMU 641config GART_IOMMU
642 bool "GART IOMMU support" if EMBEDDED 642 bool "GART IOMMU support" if EXPERT
643 default y 643 default y
644 select SWIOTLB 644 select SWIOTLB
645 depends on X86_64 && PCI && AMD_NB 645 depends on X86_64 && PCI && AMD_NB
@@ -889,7 +889,7 @@ config X86_THERMAL_VECTOR
889 depends on X86_MCE_INTEL 889 depends on X86_MCE_INTEL
890 890
891config VM86 891config VM86
892 bool "Enable VM86 support" if EMBEDDED 892 bool "Enable VM86 support" if EXPERT
893 default y 893 default y
894 depends on X86_32 894 depends on X86_32
895 ---help--- 895 ---help---
@@ -1073,7 +1073,7 @@ endchoice
1073 1073
1074choice 1074choice
1075 depends on EXPERIMENTAL 1075 depends on EXPERIMENTAL
1076 prompt "Memory split" if EMBEDDED 1076 prompt "Memory split" if EXPERT
1077 default VMSPLIT_3G 1077 default VMSPLIT_3G
1078 depends on X86_32 1078 depends on X86_32
1079 ---help--- 1079 ---help---
@@ -1135,7 +1135,7 @@ config ARCH_DMA_ADDR_T_64BIT
1135 def_bool X86_64 || HIGHMEM64G 1135 def_bool X86_64 || HIGHMEM64G
1136 1136
1137config DIRECT_GBPAGES 1137config DIRECT_GBPAGES
1138 bool "Enable 1GB pages for kernel pagetables" if EMBEDDED 1138 bool "Enable 1GB pages for kernel pagetables" if EXPERT
1139 default y 1139 default y
1140 depends on X86_64 1140 depends on X86_64
1141 ---help--- 1141 ---help---
@@ -1369,7 +1369,7 @@ config MATH_EMULATION
1369 1369
1370config MTRR 1370config MTRR
1371 def_bool y 1371 def_bool y
1372 prompt "MTRR (Memory Type Range Register) support" if EMBEDDED 1372 prompt "MTRR (Memory Type Range Register) support" if EXPERT
1373 ---help--- 1373 ---help---
1374 On Intel P6 family processors (Pentium Pro, Pentium II and later) 1374 On Intel P6 family processors (Pentium Pro, Pentium II and later)
1375 the Memory Type Range Registers (MTRRs) may be used to control 1375 the Memory Type Range Registers (MTRRs) may be used to control
@@ -1435,7 +1435,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
1435 1435
1436config X86_PAT 1436config X86_PAT
1437 def_bool y 1437 def_bool y
1438 prompt "x86 PAT support" if EMBEDDED 1438 prompt "x86 PAT support" if EXPERT
1439 depends on MTRR 1439 depends on MTRR
1440 ---help--- 1440 ---help---
1441 Use PAT attributes to setup page level cache control. 1441 Use PAT attributes to setup page level cache control.
@@ -1539,7 +1539,7 @@ config KEXEC_JUMP
1539 code in physical address mode via KEXEC 1539 code in physical address mode via KEXEC
1540 1540
1541config PHYSICAL_START 1541config PHYSICAL_START
1542 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) 1542 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
1543 default "0x1000000" 1543 default "0x1000000"
1544 ---help--- 1544 ---help---
1545 This gives the physical address where the kernel is loaded. 1545 This gives the physical address where the kernel is loaded.
@@ -1934,7 +1934,7 @@ config PCI_MMCONFIG
1934 depends on X86_64 && PCI && ACPI 1934 depends on X86_64 && PCI && ACPI
1935 1935
1936config PCI_CNB20LE_QUIRK 1936config PCI_CNB20LE_QUIRK
1937 bool "Read CNB20LE Host Bridge Windows" if EMBEDDED 1937 bool "Read CNB20LE Host Bridge Windows" if EXPERT
1938 default n 1938 default n
1939 depends on PCI && EXPERIMENTAL 1939 depends on PCI && EXPERIMENTAL
1940 help 1940 help
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 15588a0ef466..283c5a6a03a6 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -424,7 +424,7 @@ config X86_DEBUGCTLMSR
424 depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) && !UML 424 depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) && !UML
425 425
426menuconfig PROCESSOR_SELECT 426menuconfig PROCESSOR_SELECT
427 bool "Supported processor vendors" if EMBEDDED 427 bool "Supported processor vendors" if EXPERT
428 ---help--- 428 ---help---
429 This lets you choose what x86 vendor support code your kernel 429 This lets you choose what x86 vendor support code your kernel
430 will include. 430 will include.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 45143bbcfe5e..615e18810f48 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -31,7 +31,7 @@ config X86_VERBOSE_BOOTUP
31 see errors. Disable this if you want silent bootup. 31 see errors. Disable this if you want silent bootup.
32 32
33config EARLY_PRINTK 33config EARLY_PRINTK
34 bool "Early printk" if EMBEDDED 34 bool "Early printk" if EXPERT
35 default y 35 default y
36 ---help--- 36 ---help---
37 Write kernel log output directly into the VGA buffer or to a serial 37 Write kernel log output directly into the VGA buffer or to a serial
@@ -138,7 +138,7 @@ config DEBUG_NX_TEST
138 138
139config DOUBLEFAULT 139config DOUBLEFAULT
140 default y 140 default y
141 bool "Enable doublefault exception handler" if EMBEDDED 141 bool "Enable doublefault exception handler" if EXPERT
142 depends on X86_32 142 depends on X86_32
143 ---help--- 143 ---help---
144 This option allows trapping of rare doublefault exceptions that 144 This option allows trapping of rare doublefault exceptions that
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 63e35ec9075c..62f084478f7e 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -1,48 +1,8 @@
1#ifndef _ASM_X86_CACHEFLUSH_H 1#ifndef _ASM_X86_CACHEFLUSH_H
2#define _ASM_X86_CACHEFLUSH_H 2#define _ASM_X86_CACHEFLUSH_H
3 3
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the intel. */ 4/* Caches aren't brain-dead on the intel. */
8static inline void flush_cache_all(void) { } 5#include <asm-generic/cacheflush.h>
9static inline void flush_cache_mm(struct mm_struct *mm) { }
10static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
11static inline void flush_cache_range(struct vm_area_struct *vma,
12 unsigned long start, unsigned long end) { }
13static inline void flush_cache_page(struct vm_area_struct *vma,
14 unsigned long vmaddr, unsigned long pfn) { }
15#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
16static inline void flush_dcache_page(struct page *page) { }
17static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
18static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
19static inline void flush_icache_range(unsigned long start,
20 unsigned long end) { }
21static inline void flush_icache_page(struct vm_area_struct *vma,
22 struct page *page) { }
23static inline void flush_icache_user_range(struct vm_area_struct *vma,
24 struct page *page,
25 unsigned long addr,
26 unsigned long len) { }
27static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
28static inline void flush_cache_vunmap(unsigned long start,
29 unsigned long end) { }
30
31static inline void copy_to_user_page(struct vm_area_struct *vma,
32 struct page *page, unsigned long vaddr,
33 void *dst, const void *src,
34 unsigned long len)
35{
36 memcpy(dst, src, len);
37}
38
39static inline void copy_from_user_page(struct vm_area_struct *vma,
40 struct page *page, unsigned long vaddr,
41 void *dst, const void *src,
42 unsigned long len)
43{
44 memcpy(dst, src, len);
45}
46 6
47#ifdef CONFIG_X86_PAT 7#ifdef CONFIG_X86_PAT
48/* 8/*
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 4fab24de26b1..6e6e7558e702 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -32,5 +32,6 @@ extern void arch_unregister_cpu(int);
32 32
33DECLARE_PER_CPU(int, cpu_state); 33DECLARE_PER_CPU(int, cpu_state);
34 34
35int __cpuinit mwait_usable(const struct cpuinfo_x86 *);
35 36
36#endif /* _ASM_X86_CPU_H */ 37#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index f52d42e80585..574dbc22893a 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -14,7 +14,7 @@
14 do { \ 14 do { \
15 asm goto("1:" \ 15 asm goto("1:" \
16 JUMP_LABEL_INITIAL_NOP \ 16 JUMP_LABEL_INITIAL_NOP \
17 ".pushsection __jump_table, \"a\" \n\t"\ 17 ".pushsection __jump_table, \"aw\" \n\t"\
18 _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ 18 _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \
19 ".popsection \n\t" \ 19 ".popsection \n\t" \
20 : : "i" (key) : : label); \ 20 : : "i" (key) : : label); \
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h
index a37229011b56..b0ef2b449a9d 100644
--- a/arch/x86/include/asm/numa_32.h
+++ b/arch/x86/include/asm/numa_32.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_NUMA_32_H 1#ifndef _ASM_X86_NUMA_32_H
2#define _ASM_X86_NUMA_32_H 2#define _ASM_X86_NUMA_32_H
3 3
4extern int numa_off;
5
4extern int pxm_to_nid(int pxm); 6extern int pxm_to_nid(int pxm);
5extern void numa_remove_cpu(int cpu); 7extern void numa_remove_cpu(int cpu);
6 8
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 5ae87285a502..0493be39607c 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -40,6 +40,7 @@ extern void __cpuinit numa_remove_cpu(int cpu);
40#ifdef CONFIG_NUMA_EMU 40#ifdef CONFIG_NUMA_EMU
41#define FAKE_NODE_MIN_SIZE ((u64)32 << 20) 41#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
42#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) 42#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
43void numa_emu_cmdline(char *);
43#endif /* CONFIG_NUMA_EMU */ 44#endif /* CONFIG_NUMA_EMU */
44#else 45#else
45static inline void init_cpu_to_node(void) { } 46static inline void init_cpu_to_node(void) { }
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 2071a8b2b32f..ebbc4d8ab170 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -558,13 +558,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
558static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 558static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
559 pmd_t *pmdp, pmd_t pmd) 559 pmd_t *pmdp, pmd_t pmd)
560{ 560{
561#if PAGETABLE_LEVELS >= 3
562 if (sizeof(pmdval_t) > sizeof(long)) 561 if (sizeof(pmdval_t) > sizeof(long))
563 /* 5 arg words */ 562 /* 5 arg words */
564 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd); 563 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
565 else 564 else
566 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, pmd.pmd); 565 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
567#endif 566 native_pmd_val(pmd));
568} 567}
569#endif 568#endif
570 569
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 8ee45167e817..7e172955ee57 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -273,34 +273,34 @@ do { \
273 typeof(var) pxo_new__ = (nval); \ 273 typeof(var) pxo_new__ = (nval); \
274 switch (sizeof(var)) { \ 274 switch (sizeof(var)) { \
275 case 1: \ 275 case 1: \
276 asm("\n1:mov "__percpu_arg(1)",%%al" \ 276 asm("\n\tmov "__percpu_arg(1)",%%al" \
277 "\n\tcmpxchgb %2, "__percpu_arg(1) \ 277 "\n1:\tcmpxchgb %2, "__percpu_arg(1) \
278 "\n\tjnz 1b" \ 278 "\n\tjnz 1b" \
279 : "=a" (pxo_ret__), "+m" (var) \ 279 : "=&a" (pxo_ret__), "+m" (var) \
280 : "q" (pxo_new__) \ 280 : "q" (pxo_new__) \
281 : "memory"); \ 281 : "memory"); \
282 break; \ 282 break; \
283 case 2: \ 283 case 2: \
284 asm("\n1:mov "__percpu_arg(1)",%%ax" \ 284 asm("\n\tmov "__percpu_arg(1)",%%ax" \
285 "\n\tcmpxchgw %2, "__percpu_arg(1) \ 285 "\n1:\tcmpxchgw %2, "__percpu_arg(1) \
286 "\n\tjnz 1b" \ 286 "\n\tjnz 1b" \
287 : "=a" (pxo_ret__), "+m" (var) \ 287 : "=&a" (pxo_ret__), "+m" (var) \
288 : "r" (pxo_new__) \ 288 : "r" (pxo_new__) \
289 : "memory"); \ 289 : "memory"); \
290 break; \ 290 break; \
291 case 4: \ 291 case 4: \
292 asm("\n1:mov "__percpu_arg(1)",%%eax" \ 292 asm("\n\tmov "__percpu_arg(1)",%%eax" \
293 "\n\tcmpxchgl %2, "__percpu_arg(1) \ 293 "\n1:\tcmpxchgl %2, "__percpu_arg(1) \
294 "\n\tjnz 1b" \ 294 "\n\tjnz 1b" \
295 : "=a" (pxo_ret__), "+m" (var) \ 295 : "=&a" (pxo_ret__), "+m" (var) \
296 : "r" (pxo_new__) \ 296 : "r" (pxo_new__) \
297 : "memory"); \ 297 : "memory"); \
298 break; \ 298 break; \
299 case 8: \ 299 case 8: \
300 asm("\n1:mov "__percpu_arg(1)",%%rax" \ 300 asm("\n\tmov "__percpu_arg(1)",%%rax" \
301 "\n\tcmpxchgq %2, "__percpu_arg(1) \ 301 "\n1:\tcmpxchgq %2, "__percpu_arg(1) \
302 "\n\tjnz 1b" \ 302 "\n\tjnz 1b" \
303 : "=a" (pxo_ret__), "+m" (var) \ 303 : "=&a" (pxo_ret__), "+m" (var) \
304 : "r" (pxo_new__) \ 304 : "r" (pxo_new__) \
305 : "memory"); \ 305 : "memory"); \
306 break; \ 306 break; \
@@ -414,8 +414,6 @@ do { \
414#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) 414#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
415#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) 415#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
416#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) 416#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
417#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
418#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
419 417
420#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) 418#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
421#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) 419#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
@@ -432,8 +430,6 @@ do { \
432#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) 430#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
433#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) 431#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
434#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) 432#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
435#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
436#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
437 433
438#ifndef CONFIG_M386 434#ifndef CONFIG_M386
439#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) 435#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
@@ -475,11 +471,15 @@ do { \
475#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 471#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
476#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 472#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
477#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) 473#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
474#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
475#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
478 476
479#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) 477#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
480#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 478#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
481#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 479#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
482#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 480#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
481#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
482#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
483#endif 483#endif
484 484
485/* This is not atomic against other CPUs -- CPU preemption needs to be off */ 485/* This is not atomic against other CPUs -- CPU preemption needs to be off */
diff --git a/arch/x86/include/asm/system_64.h b/arch/x86/include/asm/system_64.h
deleted file mode 100644
index 1159e091ad09..000000000000
--- a/arch/x86/include/asm/system_64.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef _ASM_X86_SYSTEM_64_H
2#define _ASM_X86_SYSTEM_64_H
3
4#include <asm/segment.h>
5#include <asm/cmpxchg.h>
6
7
8static inline unsigned long read_cr8(void)
9{
10 unsigned long cr8;
11 asm volatile("movq %%cr8,%0" : "=r" (cr8));
12 return cr8;
13}
14
15static inline void write_cr8(unsigned long val)
16{
17 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
18}
19
20#include <linux/irqflags.h>
21
22#endif /* _ASM_X86_SYSTEM_64_H */
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 7283e98deaae..ec2c19a7b8ef 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -45,6 +45,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ 45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ 46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ 47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
48 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ 49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
49 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
50 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ 51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
@@ -66,6 +67,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
66 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */ 67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
67 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */ 68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
68 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */ 69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
69 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ 71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
70 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */ 72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
71 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ 73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
@@ -87,6 +89,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
87 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ 89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */ 90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
89 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ 91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
90 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ 93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
91 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ 94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
92 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */ 95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index e12246ff5aa6..6f8c5e9da97f 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -59,6 +59,7 @@ struct thermal_state {
59 59
60/* Callback to handle core threshold interrupts */ 60/* Callback to handle core threshold interrupts */
61int (*platform_thermal_notify)(__u64 msr_val); 61int (*platform_thermal_notify)(__u64 msr_val);
62EXPORT_SYMBOL(platform_thermal_notify);
62 63
63static DEFINE_PER_CPU(struct thermal_state, thermal_state); 64static DEFINE_PER_CPU(struct thermal_state, thermal_state);
64 65
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 64101335de19..a6b6fcf7f0ae 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -149,13 +149,13 @@ void dump_trace(struct task_struct *task,
149 unsigned used = 0; 149 unsigned used = 0;
150 struct thread_info *tinfo; 150 struct thread_info *tinfo;
151 int graph = 0; 151 int graph = 0;
152 unsigned long dummy;
152 unsigned long bp; 153 unsigned long bp;
153 154
154 if (!task) 155 if (!task)
155 task = current; 156 task = current;
156 157
157 if (!stack) { 158 if (!stack) {
158 unsigned long dummy;
159 stack = &dummy; 159 stack = &dummy;
160 if (task && task != current) 160 if (task && task != current)
161 stack = (unsigned long *)task->thread.sp; 161 stack = (unsigned long *)task->thread.sp;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index d8286ed54ffa..e764fc05d700 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -14,6 +14,7 @@
14#include <linux/utsname.h> 14#include <linux/utsname.h>
15#include <trace/events/power.h> 15#include <trace/events/power.h>
16#include <linux/hw_breakpoint.h> 16#include <linux/hw_breakpoint.h>
17#include <asm/cpu.h>
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/apic.h> 19#include <asm/apic.h>
19#include <asm/syscalls.h> 20#include <asm/syscalls.h>
@@ -505,7 +506,7 @@ static void poll_idle(void)
505#define MWAIT_ECX_EXTENDED_INFO 0x01 506#define MWAIT_ECX_EXTENDED_INFO 0x01
506#define MWAIT_EDX_C1 0xf0 507#define MWAIT_EDX_C1 0xf0
507 508
508static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) 509int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
509{ 510{
510 u32 eax, ebx, ecx, edx; 511 u32 eax, ebx, ecx, edx;
511 512
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 763df77343dd..0cbe8c0b35ed 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1402,8 +1402,9 @@ static inline void mwait_play_dead(void)
1402 unsigned int highest_subcstate = 0; 1402 unsigned int highest_subcstate = 0;
1403 int i; 1403 int i;
1404 void *mwait_ptr; 1404 void *mwait_ptr;
1405 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1405 1406
1406 if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT)) 1407 if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)))
1407 return; 1408 return;
1408 if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH)) 1409 if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
1409 return; 1410 return;
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index b34ab80fddd5..bf4700755184 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -34,9 +34,11 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
34#ifdef CONFIG_X86_32 34#ifdef CONFIG_X86_32
35OUTPUT_ARCH(i386) 35OUTPUT_ARCH(i386)
36ENTRY(phys_startup_32) 36ENTRY(phys_startup_32)
37jiffies = jiffies_64;
37#else 38#else
38OUTPUT_ARCH(i386:x86-64) 39OUTPUT_ARCH(i386:x86-64)
39ENTRY(phys_startup_64) 40ENTRY(phys_startup_64)
41jiffies_64 = jiffies;
40#endif 42#endif
41 43
42#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) 44#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
@@ -140,15 +142,6 @@ SECTIONS
140 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 142 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
141 143
142 DATA_DATA 144 DATA_DATA
143 /*
144 * Workaround a binutils (2.20.51.0.12 to 2.21.51.0.3) bug.
145 * This makes jiffies relocatable in such binutils
146 */
147#ifdef CONFIG_X86_32
148 jiffies = jiffies_64;
149#else
150 jiffies_64 = jiffies;
151#endif
152 CONSTRUCTORS 145 CONSTRUCTORS
153 146
154 /* rarely changed data like cpu maps */ 147 /* rarely changed data like cpu maps */
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index 38718041efc3..6e121a2a49e1 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -2,6 +2,7 @@ config LGUEST_GUEST
2 bool "Lguest guest support" 2 bool "Lguest guest support"
3 select PARAVIRT 3 select PARAVIRT
4 depends on X86_32 4 depends on X86_32
5 select VIRTUALIZATION
5 select VIRTIO 6 select VIRTIO
6 select VIRTIO_RING 7 select VIRTIO_RING
7 select VIRTIO_CONSOLE 8 select VIRTIO_CONSOLE
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 4996cf5f73a0..eba687f0cc0c 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -824,7 +824,7 @@ static void __init lguest_init_IRQ(void)
824 824
825 for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { 825 for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
826 /* Some systems map "vectors" to interrupts weirdly. Not us! */ 826 /* Some systems map "vectors" to interrupts weirdly. Not us! */
827 __get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR; 827 __this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
828 if (i != SYSCALL_VECTOR) 828 if (i != SYSCALL_VECTOR)
829 set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); 829 set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
830 } 830 }
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 787c52ca49c3..ebf6d7887a38 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -2,6 +2,28 @@
2#include <linux/topology.h> 2#include <linux/topology.h>
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/bootmem.h> 4#include <linux/bootmem.h>
5#include <asm/numa.h>
6#include <asm/acpi.h>
7
8int __initdata numa_off;
9
10static __init int numa_setup(char *opt)
11{
12 if (!opt)
13 return -EINVAL;
14 if (!strncmp(opt, "off", 3))
15 numa_off = 1;
16#ifdef CONFIG_NUMA_EMU
17 if (!strncmp(opt, "fake=", 5))
18 numa_emu_cmdline(opt + 5);
19#endif
20#ifdef CONFIG_ACPI_NUMA
21 if (!strncmp(opt, "noacpi", 6))
22 acpi_numa = -1;
23#endif
24 return 0;
25}
26early_param("numa", numa_setup);
5 27
6/* 28/*
7 * Which logical CPUs are on which nodes 29 * Which logical CPUs are on which nodes
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 1e72102e80c9..95ea1551eebc 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -30,7 +30,6 @@ s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
30 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 30 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
31}; 31};
32 32
33int numa_off __initdata;
34static unsigned long __initdata nodemap_addr; 33static unsigned long __initdata nodemap_addr;
35static unsigned long __initdata nodemap_size; 34static unsigned long __initdata nodemap_size;
36 35
@@ -263,6 +262,11 @@ static struct bootnode nodes[MAX_NUMNODES] __initdata;
263static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata; 262static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
264static char *cmdline __initdata; 263static char *cmdline __initdata;
265 264
265void __init numa_emu_cmdline(char *str)
266{
267 cmdline = str;
268}
269
266static int __init setup_physnodes(unsigned long start, unsigned long end, 270static int __init setup_physnodes(unsigned long start, unsigned long end,
267 int acpi, int amd) 271 int acpi, int amd)
268{ 272{
@@ -670,24 +674,6 @@ unsigned long __init numa_free_all_bootmem(void)
670 return pages; 674 return pages;
671} 675}
672 676
673static __init int numa_setup(char *opt)
674{
675 if (!opt)
676 return -EINVAL;
677 if (!strncmp(opt, "off", 3))
678 numa_off = 1;
679#ifdef CONFIG_NUMA_EMU
680 if (!strncmp(opt, "fake=", 5))
681 cmdline = opt + 5;
682#endif
683#ifdef CONFIG_ACPI_NUMA
684 if (!strncmp(opt, "noacpi", 6))
685 acpi_numa = -1;
686#endif
687 return 0;
688}
689early_param("numa", numa_setup);
690
691#ifdef CONFIG_NUMA 677#ifdef CONFIG_NUMA
692 678
693static __init int find_near_online_node(int node) 679static __init int find_near_online_node(int node)
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index f16434568a51..ae96e7b8051d 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -59,7 +59,6 @@ static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS];
59static int __initdata num_memory_chunks; /* total number of memory chunks */ 59static int __initdata num_memory_chunks; /* total number of memory chunks */
60static u8 __initdata apicid_to_pxm[MAX_APICID]; 60static u8 __initdata apicid_to_pxm[MAX_APICID];
61 61
62int numa_off __initdata;
63int acpi_numa __initdata; 62int acpi_numa __initdata;
64 63
65static __init void bad_srat(void) 64static __init void bad_srat(void)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 7e8d3bc80af6..50542efe45fb 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1194,7 +1194,7 @@ asmlinkage void __init xen_start_kernel(void)
1194 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 1194 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1195 1195
1196 local_irq_disable(); 1196 local_irq_disable();
1197 early_boot_irqs_off(); 1197 early_boot_irqs_disabled = true;
1198 1198
1199 memblock_init(); 1199 memblock_init();
1200 1200
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 9d30105a0c4a..6a6fe8939645 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -126,7 +126,7 @@ static const struct pv_irq_ops xen_irq_ops __initdata = {
126#endif 126#endif
127}; 127};
128 128
129void __init xen_init_irq_ops() 129void __init xen_init_irq_ops(void)
130{ 130{
131 pv_irq_ops = xen_irq_ops; 131 pv_irq_ops = xen_irq_ops;
132 x86_init.irqs.intr_init = xen_init_IRQ; 132 x86_init.irqs.intr_init = xen_init_IRQ;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8f2251d2a3f8..fd12d7ce7ff9 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -237,6 +237,18 @@ void __init xen_build_dynamic_phys_to_machine(void)
237 p2m_top[topidx] = mid; 237 p2m_top[topidx] = mid;
238 } 238 }
239 239
240 /*
241 * As long as the mfn_list has enough entries to completely
242 * fill a p2m page, pointing into the array is ok. But if
243 * not the entries beyond the last pfn will be undefined.
244 */
245 if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
246 unsigned long p2midx;
247
248 p2midx = max_pfn % P2M_PER_PAGE;
249 for ( ; p2midx < P2M_PER_PAGE; p2midx++)
250 mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
251 }
240 p2m_top[topidx][mididx] = &mfn_list[pfn]; 252 p2m_top[topidx][mididx] = &mfn_list[pfn];
241 } 253 }
242 254
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b5a7f928234b..a8a66a50d446 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -179,8 +179,13 @@ char * __init xen_memory_setup(void)
179 e820.nr_map = 0; 179 e820.nr_map = 0;
180 xen_extra_mem_start = mem_end; 180 xen_extra_mem_start = mem_end;
181 for (i = 0; i < memmap.nr_entries; i++) { 181 for (i = 0; i < memmap.nr_entries; i++) {
182 unsigned long long end = map[i].addr + map[i].size; 182 unsigned long long end;
183 183
184 /* Guard against non-page aligned E820 entries. */
185 if (map[i].type == E820_RAM)
186 map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE;
187
188 end = map[i].addr + map[i].size;
184 if (map[i].type == E820_RAM && end > mem_end) { 189 if (map[i].type == E820_RAM && end > mem_end) {
185 /* RAM off the end - may be partially included */ 190 /* RAM off the end - may be partially included */
186 u64 delta = min(map[i].size, end - mem_end); 191 u64 delta = min(map[i].size, end - mem_end);
@@ -350,6 +355,7 @@ void __init xen_arch_setup(void)
350 boot_cpu_data.hlt_works_ok = 1; 355 boot_cpu_data.hlt_works_ok = 1;
351#endif 356#endif
352 pm_idle = default_idle; 357 pm_idle = default_idle;
358 boot_option_idle_override = IDLE_HALT;
353 359
354 fiddle_vdso(); 360 fiddle_vdso();
355} 361}