aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-11 03:22:04 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-11 03:22:04 -0500
commit95fd4845ed0ffcab305b4f30ce1c12dc34f1b56c (patch)
treeaa2aac22a5b329b778a6771a87bbf1945ad49bbd /arch/x86
parentd278c48435625cb6b7edcf6a547620768b175709 (diff)
parent8e4921515c1a379539607eb443d51c30f4f7f338 (diff)
Merge commit 'v2.6.29-rc4' into perfcounters/core
Conflicts: arch/x86/kernel/setup_percpu.c arch/x86/mm/fault.c drivers/acpi/processor_idle.c kernel/irq/handle.c
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/boot/video-vesa.c11
-rw-r--r--arch/x86/configs/i386_defconfig4
-rw-r--r--arch/x86/configs/x86_64_defconfig4
-rw-r--r--arch/x86/ia32/ia32entry.S8
-rw-r--r--arch/x86/include/asm/dma-mapping.h4
-rw-r--r--arch/x86/include/asm/e820.h1
-rw-r--r--arch/x86/include/asm/io.h1
-rw-r--r--arch/x86/include/asm/kvm.h2
-rw-r--r--arch/x86/include/asm/mce.h5
-rw-r--r--arch/x86/include/asm/msr-index.h29
-rw-r--r--arch/x86/include/asm/mtrr.h1
-rw-r--r--arch/x86/include/asm/pgalloc.h1
-rw-r--r--arch/x86/include/asm/ptrace-abi.h2
-rw-r--r--arch/x86/include/asm/sigcontext.h2
-rw-r--r--arch/x86/include/asm/sigcontext32.h2
-rw-r--r--arch/x86/include/asm/swab.h2
-rw-r--r--arch/x86/include/asm/syscalls.h2
-rw-r--r--arch/x86/include/asm/timex.h13
-rw-r--r--arch/x86/kernel/acpi/sleep.c4
-rw-r--r--arch/x86/kernel/apic.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig11
-rw-r--r--arch/x86/kernel/cpu/intel.c13
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c15
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c12
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c3
-rw-r--r--arch/x86/kernel/ds.c31
-rw-r--r--arch/x86/kernel/entry_64.S1
-rw-r--r--arch/x86/kernel/hpet.c3
-rw-r--r--arch/x86/kernel/io_apic.c5
-rw-r--r--arch/x86/kernel/irqinit_32.c12
-rw-r--r--arch/x86/kernel/pci-gart_64.c2
-rw-r--r--arch/x86/kernel/setup_percpu.c2
-rw-r--r--arch/x86/kernel/signal.c11
-rw-r--r--arch/x86/kernel/tlb_uv.c1
-rw-r--r--arch/x86/kernel/vmi_32.c2
-rw-r--r--arch/x86/lguest/boot.c4
-rw-r--r--arch/x86/lib/usercopy_32.c4
-rw-r--r--arch/x86/lib/usercopy_64.c4
-rw-r--r--arch/x86/mach-default/setup.c12
-rw-r--r--arch/x86/mach-voyager/setup.c12
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c25
-rw-r--r--arch/x86/mm/fault.c7
-rw-r--r--arch/x86/mm/init_32.c48
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/iomap_32.c10
-rw-r--r--arch/x86/mm/ioremap.c25
-rw-r--r--arch/x86/mm/pageattr.c49
-rw-r--r--arch/x86/mm/pat.c6
-rw-r--r--arch/x86/pci/irq.c1
-rw-r--r--arch/x86/scripts/strip-symbols1
-rw-r--r--arch/x86/xen/multicalls.h4
51 files changed, 278 insertions, 160 deletions
diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
index 75115849af33..4a58c8ce3f69 100644
--- a/arch/x86/boot/video-vesa.c
+++ b/arch/x86/boot/video-vesa.c
@@ -269,9 +269,8 @@ void vesa_store_edid(void)
269 we genuinely have to assume all registers are destroyed here. */ 269 we genuinely have to assume all registers are destroyed here. */
270 270
271 asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es" 271 asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es"
272 : "+a" (ax), "+b" (bx) 272 : "+a" (ax), "+b" (bx), "+c" (cx), "+D" (di)
273 : "c" (cx), "D" (di) 273 : : "esi", "edx");
274 : "esi");
275 274
276 if (ax != 0x004f) 275 if (ax != 0x004f)
277 return; /* No EDID */ 276 return; /* No EDID */
@@ -285,9 +284,9 @@ void vesa_store_edid(void)
285 dx = 0; /* EDID block number */ 284 dx = 0; /* EDID block number */
286 di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */ 285 di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */
287 asm(INT10 286 asm(INT10
288 : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info) 287 : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info),
289 : "c" (cx), "D" (di) 288 "+c" (cx), "+D" (di)
290 : "esi"); 289 : : "esi");
291#endif /* CONFIG_FIRMWARE_EDID */ 290#endif /* CONFIG_FIRMWARE_EDID */
292} 291}
293 292
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index b30a08ed8eb4..edba00d98ac3 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1331,8 +1331,8 @@ CONFIG_I2C_I801=y
1331# Miscellaneous I2C Chip support 1331# Miscellaneous I2C Chip support
1332# 1332#
1333# CONFIG_DS1682 is not set 1333# CONFIG_DS1682 is not set
1334# CONFIG_AT24 is not set 1334# CONFIG_EEPROM_AT24 is not set
1335# CONFIG_SENSORS_EEPROM is not set 1335# CONFIG_EEPROM_LEGACY is not set
1336# CONFIG_SENSORS_PCF8574 is not set 1336# CONFIG_SENSORS_PCF8574 is not set
1337# CONFIG_PCF8575 is not set 1337# CONFIG_PCF8575 is not set
1338# CONFIG_SENSORS_PCA9539 is not set 1338# CONFIG_SENSORS_PCA9539 is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 0e7dbc0a3e46..322dd2748fc9 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1311,8 +1311,8 @@ CONFIG_I2C_I801=y
1311# Miscellaneous I2C Chip support 1311# Miscellaneous I2C Chip support
1312# 1312#
1313# CONFIG_DS1682 is not set 1313# CONFIG_DS1682 is not set
1314# CONFIG_AT24 is not set 1314# CONFIG_EEPROM_AT24 is not set
1315# CONFIG_SENSORS_EEPROM is not set 1315# CONFIG_EEPROM_LEGACY is not set
1316# CONFIG_SENSORS_PCF8574 is not set 1316# CONFIG_SENSORS_PCF8574 is not set
1317# CONFIG_PCF8575 is not set 1317# CONFIG_PCF8575 is not set
1318# CONFIG_SENSORS_PCA9539 is not set 1318# CONFIG_SENSORS_PCA9539 is not set
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 01e7c4c5c7fe..e4baa06bbceb 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -418,9 +418,9 @@ ENTRY(ia32_syscall)
418 orl $TS_COMPAT,TI_status(%r10) 418 orl $TS_COMPAT,TI_status(%r10)
419 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) 419 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
420 jnz ia32_tracesys 420 jnz ia32_tracesys
421ia32_do_syscall:
422 cmpl $(IA32_NR_syscalls-1),%eax 421 cmpl $(IA32_NR_syscalls-1),%eax
423 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ 422 ja ia32_badsys
423ia32_do_call:
424 IA32_ARG_FIXUP 424 IA32_ARG_FIXUP
425 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative 425 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
426ia32_sysret: 426ia32_sysret:
@@ -435,7 +435,9 @@ ia32_tracesys:
435 call syscall_trace_enter 435 call syscall_trace_enter
436 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ 436 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
437 RESTORE_REST 437 RESTORE_REST
438 jmp ia32_do_syscall 438 cmpl $(IA32_NR_syscalls-1),%eax
439 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
440 jmp ia32_do_call
439END(ia32_syscall) 441END(ia32_syscall)
440 442
441ia32_badsys: 443ia32_badsys:
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 4035357f5b9d..132a134d12f2 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -2,8 +2,8 @@
2#define _ASM_X86_DMA_MAPPING_H 2#define _ASM_X86_DMA_MAPPING_H
3 3
4/* 4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for 5 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * documentation. 6 * Documentation/DMA-API.txt for documentation.
7 */ 7 */
8 8
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 3d8ceddbd407..00d41ce4c844 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -49,6 +49,7 @@
49#define E820_RESERVED_KERN 128 49#define E820_RESERVED_KERN 128
50 50
51#ifndef __ASSEMBLY__ 51#ifndef __ASSEMBLY__
52#include <linux/types.h>
52struct e820entry { 53struct e820entry {
53 __u64 addr; /* start of memory segment */ 54 __u64 addr; /* start of memory segment */
54 __u64 size; /* size of memory segment */ 55 __u64 size; /* size of memory segment */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 05cfed4485fa..1dbbdf4be9b4 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -99,7 +99,6 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
99 * A boot-time mapping is currently limited to at most 16 pages. 99 * A boot-time mapping is currently limited to at most 16 pages.
100 */ 100 */
101extern void early_ioremap_init(void); 101extern void early_ioremap_init(void);
102extern void early_ioremap_clear(void);
103extern void early_ioremap_reset(void); 102extern void early_ioremap_reset(void);
104extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); 103extern void __iomem *early_ioremap(unsigned long offset, unsigned long size);
105extern void __iomem *early_memremap(unsigned long offset, unsigned long size); 104extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index b95162af0bf6..d2e3bf3608af 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -6,7 +6,7 @@
6 * 6 *
7 */ 7 */
8 8
9#include <asm/types.h> 9#include <linux/types.h>
10#include <linux/ioctl.h> 10#include <linux/ioctl.h>
11 11
12/* Architectural interrupt line count. */ 12/* Architectural interrupt line count. */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 1d6e17c2f23a..32c6e17b960b 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -3,8 +3,8 @@
3 3
4#ifdef __x86_64__ 4#ifdef __x86_64__
5 5
6#include <linux/types.h>
6#include <asm/ioctls.h> 7#include <asm/ioctls.h>
7#include <asm/types.h>
8 8
9/* 9/*
10 * Machine Check support for x86 10 * Machine Check support for x86
@@ -115,8 +115,6 @@ extern int mce_notify_user(void);
115 115
116#endif /* !CONFIG_X86_32 */ 116#endif /* !CONFIG_X86_32 */
117 117
118
119
120#ifdef CONFIG_X86_MCE 118#ifdef CONFIG_X86_MCE
121extern void mcheck_init(struct cpuinfo_x86 *c); 119extern void mcheck_init(struct cpuinfo_x86 *c);
122#else 120#else
@@ -126,5 +124,4 @@ extern void stop_mce(void);
126extern void restart_mce(void); 124extern void restart_mce(void);
127 125
128#endif /* __KERNEL__ */ 126#endif /* __KERNEL__ */
129
130#endif /* _ASM_X86_MCE_H */ 127#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index cb58643947b9..358acc59ae04 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -202,6 +202,35 @@
202#define MSR_IA32_THERM_STATUS 0x0000019c 202#define MSR_IA32_THERM_STATUS 0x0000019c
203#define MSR_IA32_MISC_ENABLE 0x000001a0 203#define MSR_IA32_MISC_ENABLE 0x000001a0
204 204
205/* MISC_ENABLE bits: architectural */
206#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
207#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1)
208#define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7)
209#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11)
210#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12)
211#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16)
212#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
213#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22)
214#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23)
215#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34)
216
217/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
218#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2)
219#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3)
220#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4)
221#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6)
222#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8)
223#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9)
224#define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10)
225#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10)
226#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13)
227#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19)
228#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20)
229#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24)
230#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37)
231#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38)
232#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39)
233
205/* Intel Model 6 */ 234/* Intel Model 6 */
206#define MSR_P6_EVNTSEL0 0x00000186 235#define MSR_P6_EVNTSEL0 0x00000186
207#define MSR_P6_EVNTSEL1 0x00000187 236#define MSR_P6_EVNTSEL1 0x00000187
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 14080d22edb3..a51ada8467de 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -23,6 +23,7 @@
23#ifndef _ASM_X86_MTRR_H 23#ifndef _ASM_X86_MTRR_H
24#define _ASM_X86_MTRR_H 24#define _ASM_X86_MTRR_H
25 25
26#include <linux/types.h>
26#include <linux/ioctl.h> 27#include <linux/ioctl.h>
27#include <linux/errno.h> 28#include <linux/errno.h>
28 29
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index cb7c151a8bff..dd14c54ac718 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -42,6 +42,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
42 42
43static inline void pte_free(struct mm_struct *mm, struct page *pte) 43static inline void pte_free(struct mm_struct *mm, struct page *pte)
44{ 44{
45 pgtable_page_dtor(pte);
45 __free_page(pte); 46 __free_page(pte);
46} 47}
47 48
diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h
index 25f1bb8fc626..8e0f8d199e05 100644
--- a/arch/x86/include/asm/ptrace-abi.h
+++ b/arch/x86/include/asm/ptrace-abi.h
@@ -83,7 +83,7 @@
83#ifdef CONFIG_X86_PTRACE_BTS 83#ifdef CONFIG_X86_PTRACE_BTS
84 84
85#ifndef __ASSEMBLY__ 85#ifndef __ASSEMBLY__
86#include <asm/types.h> 86#include <linux/types.h>
87 87
88/* configuration/status structure used in PTRACE_BTS_CONFIG and 88/* configuration/status structure used in PTRACE_BTS_CONFIG and
89 PTRACE_BTS_STATUS commands. 89 PTRACE_BTS_STATUS commands.
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 0afcb5e58acc..ec666491aaa4 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -2,7 +2,7 @@
2#define _ASM_X86_SIGCONTEXT_H 2#define _ASM_X86_SIGCONTEXT_H
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/types.h> 5#include <linux/types.h>
6 6
7#define FP_XSTATE_MAGIC1 0x46505853U 7#define FP_XSTATE_MAGIC1 0x46505853U
8#define FP_XSTATE_MAGIC2 0x46505845U 8#define FP_XSTATE_MAGIC2 0x46505845U
diff --git a/arch/x86/include/asm/sigcontext32.h b/arch/x86/include/asm/sigcontext32.h
index 6126188cf3a9..ad1478c4ae12 100644
--- a/arch/x86/include/asm/sigcontext32.h
+++ b/arch/x86/include/asm/sigcontext32.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_SIGCONTEXT32_H 1#ifndef _ASM_X86_SIGCONTEXT32_H
2#define _ASM_X86_SIGCONTEXT32_H 2#define _ASM_X86_SIGCONTEXT32_H
3 3
4#include <linux/types.h>
5
4/* signal context for 32bit programs. */ 6/* signal context for 32bit programs. */
5 7
6#define X86_FXSR_MAGIC 0x0000 8#define X86_FXSR_MAGIC 0x0000
diff --git a/arch/x86/include/asm/swab.h b/arch/x86/include/asm/swab.h
index 306d4178ffc9..557cd9f00661 100644
--- a/arch/x86/include/asm/swab.h
+++ b/arch/x86/include/asm/swab.h
@@ -1,7 +1,7 @@
1#ifndef _ASM_X86_SWAB_H 1#ifndef _ASM_X86_SWAB_H
2#define _ASM_X86_SWAB_H 2#define _ASM_X86_SWAB_H
3 3
4#include <asm/types.h> 4#include <linux/types.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
6 6
7static inline __attribute_const__ __u32 __arch_swab32(__u32 val) 7static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 9c6797c3e56c..c0b0bda754ee 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -40,7 +40,7 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
40 struct old_sigaction __user *); 40 struct old_sigaction __user *);
41asmlinkage int sys_sigaltstack(unsigned long); 41asmlinkage int sys_sigaltstack(unsigned long);
42asmlinkage unsigned long sys_sigreturn(unsigned long); 42asmlinkage unsigned long sys_sigreturn(unsigned long);
43asmlinkage int sys_rt_sigreturn(struct pt_regs); 43asmlinkage int sys_rt_sigreturn(unsigned long);
44 44
45/* kernel/ioport.c */ 45/* kernel/ioport.c */
46asmlinkage long sys_iopl(unsigned long); 46asmlinkage long sys_iopl(unsigned long);
diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h
index 1287dc1347d6..b5c9d45c981f 100644
--- a/arch/x86/include/asm/timex.h
+++ b/arch/x86/include/asm/timex.h
@@ -1,18 +1,13 @@
1/* x86 architecture timex specifications */
2#ifndef _ASM_X86_TIMEX_H 1#ifndef _ASM_X86_TIMEX_H
3#define _ASM_X86_TIMEX_H 2#define _ASM_X86_TIMEX_H
4 3
5#include <asm/processor.h> 4#include <asm/processor.h>
6#include <asm/tsc.h> 5#include <asm/tsc.h>
7 6
8#ifdef CONFIG_X86_ELAN 7/* The PIT ticks at this frequency (in HZ): */
9# define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */ 8#define PIT_TICK_RATE 1193182
10#elif defined(CONFIG_X86_RDC321X) 9
11# define PIT_TICK_RATE 1041667 /* Underlying HZ for R8610 */ 10#define CLOCK_TICK_RATE PIT_TICK_RATE
12#else
13# define PIT_TICK_RATE 1193182 /* Underlying HZ */
14#endif
15#define CLOCK_TICK_RATE PIT_TICK_RATE
16 11
17#define ARCH_HAS_READ_CURRENT_TIMER 12#define ARCH_HAS_READ_CURRENT_TIMER
18 13
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 4abff454c55b..7c243a2c5115 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -157,11 +157,11 @@ static int __init acpi_sleep_setup(char *str)
157#ifdef CONFIG_HIBERNATION 157#ifdef CONFIG_HIBERNATION
158 if (strncmp(str, "s4_nohwsig", 10) == 0) 158 if (strncmp(str, "s4_nohwsig", 10) == 0)
159 acpi_no_s4_hw_signature(); 159 acpi_no_s4_hw_signature();
160 if (strncmp(str, "s4_nonvs", 8) == 0)
161 acpi_s4_no_nvs();
160#endif 162#endif
161 if (strncmp(str, "old_ordering", 12) == 0) 163 if (strncmp(str, "old_ordering", 12) == 0)
162 acpi_old_suspend_ordering(); 164 acpi_old_suspend_ordering();
163 if (strncmp(str, "s4_nonvs", 8) == 0)
164 acpi_s4_no_nvs();
165 str = strchr(str, ','); 165 str = strchr(str, ',');
166 if (str != NULL) 166 if (str != NULL)
167 str += strspn(str, ", \t"); 167 str += strspn(str, ", \t");
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 849c23009bf5..abfa0b641aea 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -1447,7 +1447,7 @@ static int __init detect_init_APIC(void)
1447 switch (boot_cpu_data.x86_vendor) { 1447 switch (boot_cpu_data.x86_vendor) {
1448 case X86_VENDOR_AMD: 1448 case X86_VENDOR_AMD:
1449 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || 1449 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
1450 (boot_cpu_data.x86 == 15)) 1450 (boot_cpu_data.x86 >= 15))
1451 break; 1451 break;
1452 goto no_apic; 1452 goto no_apic;
1453 case X86_VENDOR_INTEL: 1453 case X86_VENDOR_INTEL:
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig
index efae3b22a0ff..65792c2cc462 100644
--- a/arch/x86/kernel/cpu/cpufreq/Kconfig
+++ b/arch/x86/kernel/cpu/cpufreq/Kconfig
@@ -245,17 +245,6 @@ config X86_E_POWERSAVER
245 245
246comment "shared options" 246comment "shared options"
247 247
248config X86_ACPI_CPUFREQ_PROC_INTF
249 bool "/proc/acpi/processor/../performance interface (deprecated)"
250 depends on PROC_FS
251 depends on X86_ACPI_CPUFREQ || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI
252 help
253 This enables the deprecated /proc/acpi/processor/../performance
254 interface. While it is helpful for debugging, the generic,
255 cross-architecture cpufreq interfaces should be used.
256
257 If in doubt, say N.
258
259config X86_SPEEDSTEP_LIB 248config X86_SPEEDSTEP_LIB
260 tristate 249 tristate
261 default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD) 250 default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8ea6929e974c..430e5c38a544 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -29,6 +29,19 @@
29 29
30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
31{ 31{
32 /* Unmask CPUID levels if masked: */
33 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
34 u64 misc_enable;
35
36 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
37
38 if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
39 misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
40 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
41 c->cpuid_level = cpuid_eax(0);
42 }
43 }
44
32 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 45 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
33 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 46 (c->x86 == 0x6 && c->x86_model >= 0x0e))
34 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 47 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 58527a9fc404..7293508d8f5c 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -36,8 +36,11 @@ static struct _cache_table cache_table[] __cpuinitdata =
36{ 36{
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
39 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ 40 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
40 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ 41 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
42 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
43 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
41 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 44 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
42 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 45 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 46 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
@@ -85,6 +88,18 @@ static struct _cache_table cache_table[] __cpuinitdata =
85 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ 88 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
86 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ 89 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
87 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ 90 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
91 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
92 { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
93 { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
94 { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
95 { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
96 { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
97 { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
98 { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
99 { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
100 { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
101 { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
102 { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
88 { 0x00, 0, 0} 103 { 0x00, 0, 0}
89}; 104};
90 105
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index b59ddcc88cd8..0c0a455fe95c 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -33,11 +33,13 @@ u64 mtrr_tom2;
33struct mtrr_state_type mtrr_state = {}; 33struct mtrr_state_type mtrr_state = {};
34EXPORT_SYMBOL_GPL(mtrr_state); 34EXPORT_SYMBOL_GPL(mtrr_state);
35 35
36#undef MODULE_PARAM_PREFIX 36static int __initdata mtrr_show;
37#define MODULE_PARAM_PREFIX "mtrr." 37static int __init mtrr_debug(char *opt)
38 38{
39static int mtrr_show; 39 mtrr_show = 1;
40module_param_named(show, mtrr_show, bool, 0); 40 return 0;
41}
42early_param("mtrr.show", mtrr_debug);
41 43
42/* 44/*
43 * Returns the effective MTRR type for the region 45 * Returns the effective MTRR type for the region
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index d259e5d2e054..236a401b8259 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -1594,8 +1594,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1594 1594
1595 /* kvm/qemu doesn't have mtrr set right, don't trim them all */ 1595 /* kvm/qemu doesn't have mtrr set right, don't trim them all */
1596 if (!highest_pfn) { 1596 if (!highest_pfn) {
1597 WARN(!kvm_para_available(), KERN_WARNING 1597 printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
1598 "WARNING: strange, CPU MTRRs all blank?\n");
1599 return 0; 1598 return 0;
1600 } 1599 }
1601 1600
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index da91701a2348..169a120587be 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -15,8 +15,8 @@
15 * - buffer allocation (memory accounting) 15 * - buffer allocation (memory accounting)
16 * 16 *
17 * 17 *
18 * Copyright (C) 2007-2008 Intel Corporation. 18 * Copyright (C) 2007-2009 Intel Corporation.
19 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008 19 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
20 */ 20 */
21 21
22 22
@@ -890,7 +890,7 @@ int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value)
890} 890}
891 891
892static const struct ds_configuration ds_cfg_netburst = { 892static const struct ds_configuration ds_cfg_netburst = {
893 .name = "netburst", 893 .name = "Netburst",
894 .ctl[dsf_bts] = (1 << 2) | (1 << 3), 894 .ctl[dsf_bts] = (1 << 2) | (1 << 3),
895 .ctl[dsf_bts_kernel] = (1 << 5), 895 .ctl[dsf_bts_kernel] = (1 << 5),
896 .ctl[dsf_bts_user] = (1 << 6), 896 .ctl[dsf_bts_user] = (1 << 6),
@@ -904,7 +904,7 @@ static const struct ds_configuration ds_cfg_netburst = {
904#endif 904#endif
905}; 905};
906static const struct ds_configuration ds_cfg_pentium_m = { 906static const struct ds_configuration ds_cfg_pentium_m = {
907 .name = "pentium m", 907 .name = "Pentium M",
908 .ctl[dsf_bts] = (1 << 6) | (1 << 7), 908 .ctl[dsf_bts] = (1 << 6) | (1 << 7),
909 909
910 .sizeof_field = sizeof(long), 910 .sizeof_field = sizeof(long),
@@ -915,8 +915,8 @@ static const struct ds_configuration ds_cfg_pentium_m = {
915 .sizeof_rec[ds_pebs] = sizeof(long) * 18, 915 .sizeof_rec[ds_pebs] = sizeof(long) * 18,
916#endif 916#endif
917}; 917};
918static const struct ds_configuration ds_cfg_core2 = { 918static const struct ds_configuration ds_cfg_core2_atom = {
919 .name = "core 2", 919 .name = "Core 2/Atom",
920 .ctl[dsf_bts] = (1 << 6) | (1 << 7), 920 .ctl[dsf_bts] = (1 << 6) | (1 << 7),
921 .ctl[dsf_bts_kernel] = (1 << 9), 921 .ctl[dsf_bts_kernel] = (1 << 9),
922 .ctl[dsf_bts_user] = (1 << 10), 922 .ctl[dsf_bts_user] = (1 << 10),
@@ -949,19 +949,22 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
949 switch (c->x86) { 949 switch (c->x86) {
950 case 0x6: 950 case 0x6:
951 switch (c->x86_model) { 951 switch (c->x86_model) {
952 case 0 ... 0xC: 952 case 0x9:
953 /* sorry, don't know about them */ 953 case 0xd: /* Pentium M */
954 break;
955 case 0xD:
956 case 0xE: /* Pentium M */
957 ds_configure(&ds_cfg_pentium_m); 954 ds_configure(&ds_cfg_pentium_m);
958 break; 955 break;
959 default: /* Core2, Atom, ... */ 956 case 0xf:
960 ds_configure(&ds_cfg_core2); 957 case 0x17: /* Core2 */
958 case 0x1c: /* Atom */
959 ds_configure(&ds_cfg_core2_atom);
960 break;
961 case 0x1a: /* i7 */
962 default:
963 /* sorry, don't know about them */
961 break; 964 break;
962 } 965 }
963 break; 966 break;
964 case 0xF: 967 case 0xf:
965 switch (c->x86_model) { 968 switch (c->x86_model) {
966 case 0x0: 969 case 0x0:
967 case 0x1: 970 case 0x1:
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index eb0a0703f4c9..8f8f61a1fce8 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -347,6 +347,7 @@ ENTRY(save_args)
347 popq_cfi %rax /* move return address... */ 347 popq_cfi %rax /* move return address... */
348 mov PER_CPU_VAR(irq_stack_ptr),%rsp 348 mov PER_CPU_VAR(irq_stack_ptr),%rsp
349 EMPTY_FRAME 0 349 EMPTY_FRAME 0
350 pushq_cfi %rbp /* backlink for unwinder */
350 pushq_cfi %rax /* ... to the new stack */ 351 pushq_cfi %rax /* ... to the new stack */
351 /* 352 /*
352 * We entered an interrupt context - irqs are off: 353 * We entered an interrupt context - irqs are off:
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index cd759ad90690..64d5ad0b8add 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -628,11 +628,12 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
628 628
629 switch (action & 0xf) { 629 switch (action & 0xf) {
630 case CPU_ONLINE: 630 case CPU_ONLINE:
631 INIT_DELAYED_WORK(&work.work, hpet_work); 631 INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work);
632 init_completion(&work.complete); 632 init_completion(&work.complete);
633 /* FIXME: add schedule_work_on() */ 633 /* FIXME: add schedule_work_on() */
634 schedule_delayed_work_on(cpu, &work.work, 0); 634 schedule_delayed_work_on(cpu, &work.work, 0);
635 wait_for_completion(&work.complete); 635 wait_for_completion(&work.complete);
636 destroy_timer_on_stack(&work.work.timer);
636 break; 637 break;
637 case CPU_DEAD: 638 case CPU_DEAD:
638 if (hdev) { 639 if (hdev) {
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index e4d36bd56b62..0a7f6d6b1206 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -2527,14 +2527,15 @@ static void irq_complete_move(struct irq_desc **descp)
2527 2527
2528 vector = ~get_irq_regs()->orig_ax; 2528 vector = ~get_irq_regs()->orig_ax;
2529 me = smp_processor_id(); 2529 me = smp_processor_id();
2530
2531 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) {
2530#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC 2532#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
2531 *descp = desc = move_irq_desc(desc, me); 2533 *descp = desc = move_irq_desc(desc, me);
2532 /* get the new one */ 2534 /* get the new one */
2533 cfg = desc->chip_data; 2535 cfg = desc->chip_data;
2534#endif 2536#endif
2535
2536 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2537 send_cleanup_vector(cfg); 2537 send_cleanup_vector(cfg);
2538 }
2538} 2539}
2539#else 2540#else
2540static inline void irq_complete_move(struct irq_desc **descp) {} 2541static inline void irq_complete_move(struct irq_desc **descp) {}
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 520e6c1c5d22..f6ff71cdaba8 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -78,15 +78,6 @@ void __init init_ISA_irqs(void)
78 } 78 }
79} 79}
80 80
81/*
82 * IRQ2 is cascade interrupt to second interrupt controller
83 */
84static struct irqaction irq2 = {
85 .handler = no_action,
86 .mask = CPU_MASK_NONE,
87 .name = "cascade",
88};
89
90DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 81DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
91 [0 ... IRQ0_VECTOR - 1] = -1, 82 [0 ... IRQ0_VECTOR - 1] = -1,
92 [IRQ0_VECTOR] = 0, 83 [IRQ0_VECTOR] = 0,
@@ -198,9 +189,6 @@ void __init native_init_IRQ(void)
198 set_intr_gate(vector, interrupt[i]); 189 set_intr_gate(vector, interrupt[i]);
199 } 190 }
200 191
201 if (!acpi_ioapic)
202 setup_irq(2, &irq2);
203
204 /* setup after call gates are initialised (usually add in 192 /* setup after call gates are initialised (usually add in
205 * the architecture specific gates) 193 * the architecture specific gates)
206 */ 194 */
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 00c2bcd41463..d5768b1af080 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -5,7 +5,7 @@
5 * This allows to use PCI devices that only support 32bit addresses on systems 5 * This allows to use PCI devices that only support 32bit addresses on systems
6 * with more than 4GB. 6 * with more than 4GB.
7 * 7 *
8 * See Documentation/DMA-mapping.txt for the interface specification. 8 * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification.
9 * 9 *
10 * Copyright 2002 Andi Kleen, SuSE Labs. 10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only. 11 * Subject to the GNU General Public License v2 only.
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 90b8e154bb53..e553803cd2db 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -80,7 +80,7 @@ static inline void setup_node_to_cpumask_map(void) { }
80#ifdef CONFIG_X86_64 80#ifdef CONFIG_X86_64
81 81
82/* correctly size the local cpu masks */ 82/* correctly size the local cpu masks */
83static void setup_cpu_local_masks(void) 83static void __init setup_cpu_local_masks(void)
84{ 84{
85 alloc_bootmem_cpumask_var(&cpu_initialized_mask); 85 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
86 alloc_bootmem_cpumask_var(&cpu_callin_mask); 86 alloc_bootmem_cpumask_var(&cpu_callin_mask);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 4fa5243c2069..0bc73d67acfb 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -632,9 +632,16 @@ badframe:
632} 632}
633 633
634#ifdef CONFIG_X86_32 634#ifdef CONFIG_X86_32
635asmlinkage int sys_rt_sigreturn(struct pt_regs regs) 635/*
636 * Note: do not pass in pt_regs directly as with tail-call optimization
637 * GCC will incorrectly stomp on the caller's frame and corrupt user-space
638 * register state:
639 */
640asmlinkage int sys_rt_sigreturn(unsigned long __unused)
636{ 641{
637 return do_rt_sigreturn(&regs); 642 struct pt_regs *regs = (struct pt_regs *)&__unused;
643
644 return do_rt_sigreturn(regs);
638} 645}
639#else /* !CONFIG_X86_32 */ 646#else /* !CONFIG_X86_32 */
640asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) 647asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index aae15dd72604..89fce1b6d01f 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -201,6 +201,7 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
201 destination_timeouts = 0; 201 destination_timeouts = 0;
202 } 202 }
203 } 203 }
204 cpu_relax();
204 } 205 }
205 return FLUSH_COMPLETE; 206 return FLUSH_COMPLETE;
206} 207}
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 23206ba16874..1d3302cc2ddf 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -858,7 +858,7 @@ void __init vmi_init(void)
858#endif 858#endif
859} 859}
860 860
861void vmi_activate(void) 861void __init vmi_activate(void)
862{ 862{
863 unsigned long flags; 863 unsigned long flags;
864 864
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index a7ed208f81e3..92f1c6f3e19d 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -931,7 +931,7 @@ static void lguest_restart(char *reason)
931 * that we can fit comfortably. 931 * that we can fit comfortably.
932 * 932 *
933 * First we need assembly templates of each of the patchable Guest operations, 933 * First we need assembly templates of each of the patchable Guest operations,
934 * and these are in lguest_asm.S. */ 934 * and these are in i386_head.S. */
935 935
936/*G:060 We construct a table from the assembler templates: */ 936/*G:060 We construct a table from the assembler templates: */
937static const struct lguest_insns 937static const struct lguest_insns
@@ -1093,7 +1093,7 @@ __init void lguest_init(void)
1093 acpi_ht = 0; 1093 acpi_ht = 0;
1094#endif 1094#endif
1095 1095
1096 /* We set the perferred console to "hvc". This is the "hypervisor 1096 /* We set the preferred console to "hvc". This is the "hypervisor
1097 * virtual console" driver written by the PowerPC people, which we also 1097 * virtual console" driver written by the PowerPC people, which we also
1098 * adapted for lguest's use. */ 1098 * adapted for lguest's use. */
1099 add_preferred_console("hvc", 0, NULL); 1099 add_preferred_console("hvc", 0, NULL);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 4a20b2f9a381..7c8ca91bb9ec 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -56,7 +56,7 @@ do { \
56 " jmp 2b\n" \ 56 " jmp 2b\n" \
57 ".previous\n" \ 57 ".previous\n" \
58 _ASM_EXTABLE(0b,3b) \ 58 _ASM_EXTABLE(0b,3b) \
59 : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ 59 : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
60 "=&D" (__d2) \ 60 "=&D" (__d2) \
61 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ 61 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
62 : "memory"); \ 62 : "memory"); \
@@ -218,7 +218,7 @@ long strnlen_user(const char __user *s, long n)
218 " .align 4\n" 218 " .align 4\n"
219 " .long 0b,2b\n" 219 " .long 0b,2b\n"
220 ".previous" 220 ".previous"
221 :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp) 221 :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
222 :"0" (n), "1" (s), "2" (0), "3" (mask) 222 :"0" (n), "1" (s), "2" (0), "3" (mask)
223 :"cc"); 223 :"cc");
224 return res & mask; 224 return res & mask;
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 64d6c84e6353..ec13cb5f17ed 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -32,7 +32,7 @@ do { \
32 " jmp 2b\n" \ 32 " jmp 2b\n" \
33 ".previous\n" \ 33 ".previous\n" \
34 _ASM_EXTABLE(0b,3b) \ 34 _ASM_EXTABLE(0b,3b) \
35 : "=r"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ 35 : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
36 "=&D" (__d2) \ 36 "=&D" (__d2) \
37 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ 37 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
38 : "memory"); \ 38 : "memory"); \
@@ -86,7 +86,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
86 ".previous\n" 86 ".previous\n"
87 _ASM_EXTABLE(0b,3b) 87 _ASM_EXTABLE(0b,3b)
88 _ASM_EXTABLE(1b,2b) 88 _ASM_EXTABLE(1b,2b)
89 : [size8] "=c"(size), [dst] "=&D" (__d0) 89 : [size8] "=&c"(size), [dst] "=&D" (__d0)
90 : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), 90 : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
91 [zero] "r" (0UL), [eight] "r" (8UL)); 91 [zero] "r" (0UL), [eight] "r" (8UL));
92 return size; 92 return size;
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c
index df167f265622..a265a7c63190 100644
--- a/arch/x86/mach-default/setup.c
+++ b/arch/x86/mach-default/setup.c
@@ -38,6 +38,15 @@ void __init pre_intr_init_hook(void)
38 init_ISA_irqs(); 38 init_ISA_irqs();
39} 39}
40 40
41/*
42 * IRQ2 is cascade interrupt to second interrupt controller
43 */
44static struct irqaction irq2 = {
45 .handler = no_action,
46 .mask = CPU_MASK_NONE,
47 .name = "cascade",
48};
49
41/** 50/**
42 * intr_init_hook - post gate setup interrupt initialisation 51 * intr_init_hook - post gate setup interrupt initialisation
43 * 52 *
@@ -53,6 +62,9 @@ void __init intr_init_hook(void)
53 if (x86_quirks->arch_intr_init()) 62 if (x86_quirks->arch_intr_init())
54 return; 63 return;
55 } 64 }
65 if (!acpi_ioapic)
66 setup_irq(2, &irq2);
67
56} 68}
57 69
58/** 70/**
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
index 0ade62555ff3..66b7eb57d8e4 100644
--- a/arch/x86/mach-voyager/setup.c
+++ b/arch/x86/mach-voyager/setup.c
@@ -34,13 +34,23 @@ void __init intr_init_hook(void)
34 setup_irq(2, &irq2); 34 setup_irq(2, &irq2);
35} 35}
36 36
37void __init pre_setup_arch_hook(void) 37static void voyager_disable_tsc(void)
38{ 38{
39 /* Voyagers run their CPUs from independent clocks, so disable 39 /* Voyagers run their CPUs from independent clocks, so disable
40 * the TSC code because we can't sync them */ 40 * the TSC code because we can't sync them */
41 setup_clear_cpu_cap(X86_FEATURE_TSC); 41 setup_clear_cpu_cap(X86_FEATURE_TSC);
42} 42}
43 43
44void __init pre_setup_arch_hook(void)
45{
46 voyager_disable_tsc();
47}
48
49void __init pre_time_init_hook(void)
50{
51 voyager_disable_tsc();
52}
53
44void __init trap_init_hook(void) 54void __init trap_init_hook(void)
45{ 55{
46} 56}
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 96f15b09a4c5..2c74aec4efc1 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq);
81static void disable_local_vic_irq(unsigned int irq); 81static void disable_local_vic_irq(unsigned int irq);
82static void before_handle_vic_irq(unsigned int irq); 82static void before_handle_vic_irq(unsigned int irq);
83static void after_handle_vic_irq(unsigned int irq); 83static void after_handle_vic_irq(unsigned int irq);
84static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); 84static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask);
85static void ack_vic_irq(unsigned int irq); 85static void ack_vic_irq(unsigned int irq);
86static void vic_enable_cpi(void); 86static void vic_enable_cpi(void);
87static void do_boot_cpu(__u8 cpuid); 87static void do_boot_cpu(__u8 cpuid);
@@ -211,8 +211,6 @@ static __u32 cpu_booted_map;
211static cpumask_t smp_commenced_mask = CPU_MASK_NONE; 211static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
212 212
213/* This is for the new dynamic CPU boot code */ 213/* This is for the new dynamic CPU boot code */
214cpumask_t cpu_callin_map = CPU_MASK_NONE;
215cpumask_t cpu_callout_map = CPU_MASK_NONE;
216 214
217/* The per processor IRQ masks (these are usually kept in sync) */ 215/* The per processor IRQ masks (these are usually kept in sync) */
218static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; 216static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
@@ -378,7 +376,7 @@ void __init find_smp_config(void)
378 cpus_addr(phys_cpu_present_map)[0] |= 376 cpus_addr(phys_cpu_present_map)[0] |=
379 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 377 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
380 3) << 24; 378 3) << 24;
381 cpu_possible_map = phys_cpu_present_map; 379 init_cpu_possible(&phys_cpu_present_map);
382 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", 380 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
383 cpus_addr(phys_cpu_present_map)[0]); 381 cpus_addr(phys_cpu_present_map)[0]);
384 /* Here we set up the VIC to enable SMP */ 382 /* Here we set up the VIC to enable SMP */
@@ -1600,16 +1598,16 @@ static void after_handle_vic_irq(unsigned int irq)
1600 * change the mask and then do an interrupt enable CPI to re-enable on 1598 * change the mask and then do an interrupt enable CPI to re-enable on
1601 * the selected processors */ 1599 * the selected processors */
1602 1600
1603void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) 1601void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask)
1604{ 1602{
1605 /* Only extended processors handle interrupts */ 1603 /* Only extended processors handle interrupts */
1606 unsigned long real_mask; 1604 unsigned long real_mask;
1607 unsigned long irq_mask = 1 << irq; 1605 unsigned long irq_mask = 1 << irq;
1608 int cpu; 1606 int cpu;
1609 1607
1610 real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; 1608 real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors;
1611 1609
1612 if (cpus_addr(mask)[0] == 0) 1610 if (cpus_addr(*mask)[0] == 0)
1613 /* can't have no CPUs to accept the interrupt -- extremely 1611 /* can't have no CPUs to accept the interrupt -- extremely
1614 * bad things will happen */ 1612 * bad things will happen */
1615 return; 1613 return;
@@ -1752,10 +1750,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void)
1752 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; 1750 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
1753 switch_to_new_gdt(); 1751 switch_to_new_gdt();
1754 1752
1755 cpu_set(smp_processor_id(), cpu_online_map); 1753 cpu_online_map = cpumask_of_cpu(smp_processor_id());
1756 cpu_set(smp_processor_id(), cpu_callout_map); 1754 cpu_callout_map = cpumask_of_cpu(smp_processor_id());
1757 cpu_set(smp_processor_id(), cpu_possible_map); 1755 cpu_callin_map = CPU_MASK_NONE;
1758 cpu_set(smp_processor_id(), cpu_present_map); 1756 cpu_present_map = cpumask_of_cpu(smp_processor_id());
1757
1759} 1758}
1760 1759
1761static int __cpuinit voyager_cpu_up(unsigned int cpu) 1760static int __cpuinit voyager_cpu_up(unsigned int cpu)
@@ -1785,9 +1784,9 @@ void __init smp_setup_processor_id(void)
1785 percpu_write(cpu_number, hard_smp_processor_id()); 1784 percpu_write(cpu_number, hard_smp_processor_id());
1786} 1785}
1787 1786
1788static void voyager_send_call_func(cpumask_t callmask) 1787static void voyager_send_call_func(const struct cpumask *callmask)
1789{ 1788{
1790 __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); 1789 __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
1791 send_CPI(mask, VIC_CALL_FUNCTION_CPI); 1790 send_CPI(mask, VIC_CALL_FUNCTION_CPI);
1792} 1791}
1793 1792
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 65709a6aa6ee..8c3f3113a6ec 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -807,8 +807,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
807 /* get the address */ 807 /* get the address */
808 address = read_cr2(); 808 address = read_cr2();
809 809
810 if (unlikely(notify_page_fault(regs)))
811 return;
812 if (unlikely(kmmio_fault(regs, address))) 810 if (unlikely(kmmio_fault(regs, address)))
813 return; 811 return;
814 812
@@ -838,6 +836,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
838 if (spurious_fault(error_code, address)) 836 if (spurious_fault(error_code, address))
839 return; 837 return;
840 838
839 /* kprobes don't want to hook the spurious faults. */
840 if (notify_page_fault(regs))
841 return;
841 /* 842 /*
842 * Don't take the mm semaphore here. If we fixup a prefetch 843 * Don't take the mm semaphore here. If we fixup a prefetch
843 * fault we could otherwise deadlock. 844 * fault we could otherwise deadlock.
@@ -846,6 +847,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
846 return; 847 return;
847 } 848 }
848 849
850 if (unlikely(notify_page_fault(regs)))
851 return;
849 /* 852 /*
850 * It's safe to allow irq's after cr2 has been saved and the 853 * It's safe to allow irq's after cr2 has been saved and the
851 * vmalloc fault has been handled. 854 * vmalloc fault has been handled.
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 4a6989e47a53..00263bf07a88 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -137,6 +137,47 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
137 return pte_offset_kernel(pmd, 0); 137 return pte_offset_kernel(pmd, 0);
138} 138}
139 139
140static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
141 unsigned long vaddr, pte_t *lastpte)
142{
143#ifdef CONFIG_HIGHMEM
144 /*
145 * Something (early fixmap) may already have put a pte
146 * page here, which causes the page table allocation
147 * to become nonlinear. Attempt to fix it, and if it
148 * is still nonlinear then we have to bug.
149 */
150 int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
151 int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
152
153 if (pmd_idx_kmap_begin != pmd_idx_kmap_end
154 && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
155 && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
156 && ((__pa(pte) >> PAGE_SHIFT) < table_start
157 || (__pa(pte) >> PAGE_SHIFT) >= table_end)) {
158 pte_t *newpte;
159 int i;
160
161 BUG_ON(after_init_bootmem);
162 newpte = alloc_low_page();
163 for (i = 0; i < PTRS_PER_PTE; i++)
164 set_pte(newpte + i, pte[i]);
165
166 paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
167 set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
168 BUG_ON(newpte != pte_offset_kernel(pmd, 0));
169 __flush_tlb_all();
170
171 paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
172 pte = newpte;
173 }
174 BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
175 && vaddr > fix_to_virt(FIX_KMAP_END)
176 && lastpte && lastpte + PTRS_PER_PTE != pte);
177#endif
178 return pte;
179}
180
140/* 181/*
141 * This function initializes a certain range of kernel virtual memory 182 * This function initializes a certain range of kernel virtual memory
142 * with new bootmem page tables, everywhere page tables are missing in 183 * with new bootmem page tables, everywhere page tables are missing in
@@ -153,6 +194,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
153 unsigned long vaddr; 194 unsigned long vaddr;
154 pgd_t *pgd; 195 pgd_t *pgd;
155 pmd_t *pmd; 196 pmd_t *pmd;
197 pte_t *pte = NULL;
156 198
157 vaddr = start; 199 vaddr = start;
158 pgd_idx = pgd_index(vaddr); 200 pgd_idx = pgd_index(vaddr);
@@ -164,7 +206,8 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
164 pmd = pmd + pmd_index(vaddr); 206 pmd = pmd + pmd_index(vaddr);
165 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); 207 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
166 pmd++, pmd_idx++) { 208 pmd++, pmd_idx++) {
167 one_page_table_init(pmd); 209 pte = page_table_kmap_check(one_page_table_init(pmd),
210 pmd, vaddr, pte);
168 211
169 vaddr += PMD_SIZE; 212 vaddr += PMD_SIZE;
170 } 213 }
@@ -507,7 +550,6 @@ static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
507 * Fixed mappings, only the page table structure has to be 550 * Fixed mappings, only the page table structure has to be
508 * created - mappings will be set by set_fixmap(): 551 * created - mappings will be set by set_fixmap():
509 */ 552 */
510 early_ioremap_clear();
511 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 553 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
512 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; 554 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
513 page_table_range_init(vaddr, end, pgd_base); 555 page_table_range_init(vaddr, end, pgd_base);
@@ -800,7 +842,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse)
800 tables += PAGE_ALIGN(ptes * sizeof(pte_t)); 842 tables += PAGE_ALIGN(ptes * sizeof(pte_t));
801 843
802 /* for fixmap */ 844 /* for fixmap */
803 tables += PAGE_SIZE * 2; 845 tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t));
804 846
805 /* 847 /*
806 * RED-PEN putting page tables only on node 0 could 848 * RED-PEN putting page tables only on node 0 could
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 23f68e77ad1f..e6d36b490250 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -596,7 +596,7 @@ static void __init init_gbpages(void)
596 direct_gbpages = 0; 596 direct_gbpages = 0;
597} 597}
598 598
599static unsigned long __init kernel_physical_mapping_init(unsigned long start, 599static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
600 unsigned long end, 600 unsigned long end,
601 unsigned long page_size_mask) 601 unsigned long page_size_mask)
602{ 602{
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index d0151d8ce452..ca53224fc56c 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <asm/iomap.h> 19#include <asm/iomap.h>
20#include <asm/pat.h>
20#include <linux/module.h> 21#include <linux/module.h>
21 22
22/* Map 'pfn' using fixed map 'type' and protections 'prot' 23/* Map 'pfn' using fixed map 'type' and protections 'prot'
@@ -29,6 +30,15 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
29 30
30 pagefault_disable(); 31 pagefault_disable();
31 32
33 /*
34 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
35 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
36 * MTRR is UC or WC. UC_MINUS gets the real intention, of the
37 * user, which is "WC if the MTRR is WC, UC if you can't do that."
38 */
39 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
40 prot = PAGE_KERNEL_UC_MINUS;
41
32 idx = type + KM_TYPE_NR*smp_processor_id(); 42 idx = type + KM_TYPE_NR*smp_processor_id();
33 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
34 set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); 44 set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index bd85d42819e1..af750ab973b6 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -557,34 +557,9 @@ void __init early_ioremap_init(void)
557 } 557 }
558} 558}
559 559
560void __init early_ioremap_clear(void)
561{
562 pmd_t *pmd;
563
564 if (early_ioremap_debug)
565 printk(KERN_INFO "early_ioremap_clear()\n");
566
567 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
568 pmd_clear(pmd);
569 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
570 __flush_tlb_all();
571}
572
573void __init early_ioremap_reset(void) 560void __init early_ioremap_reset(void)
574{ 561{
575 enum fixed_addresses idx;
576 unsigned long addr, phys;
577 pte_t *pte;
578
579 after_paging_init = 1; 562 after_paging_init = 1;
580 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
581 addr = fix_to_virt(idx);
582 pte = early_ioremap_pte(addr);
583 if (pte_present(*pte)) {
584 phys = pte_val(*pte) & PAGE_MASK;
585 set_fixmap(idx, phys);
586 }
587 }
588} 563}
589 564
590static void __init __early_set_fixmap(enum fixed_addresses idx, 565static void __init __early_set_fixmap(enum fixed_addresses idx,
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e89d24815f26..84ba74820ad6 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -534,6 +534,36 @@ out_unlock:
534 return 0; 534 return 0;
535} 535}
536 536
537static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
538 int primary)
539{
540 /*
541 * Ignore all non primary paths.
542 */
543 if (!primary)
544 return 0;
545
546 /*
547 * Ignore the NULL PTE for kernel identity mapping, as it is expected
548 * to have holes.
549 * Also set numpages to '1' indicating that we processed cpa req for
550 * one virtual address page and its pfn. TBD: numpages can be set based
551 * on the initial value and the level returned by lookup_address().
552 */
553 if (within(vaddr, PAGE_OFFSET,
554 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
555 cpa->numpages = 1;
556 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
557 return 0;
558 } else {
559 WARN(1, KERN_WARNING "CPA: called for zero pte. "
560 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
561 *cpa->vaddr);
562
563 return -EFAULT;
564 }
565}
566
537static int __change_page_attr(struct cpa_data *cpa, int primary) 567static int __change_page_attr(struct cpa_data *cpa, int primary)
538{ 568{
539 unsigned long address; 569 unsigned long address;
@@ -549,17 +579,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
549repeat: 579repeat:
550 kpte = lookup_address(address, &level); 580 kpte = lookup_address(address, &level);
551 if (!kpte) 581 if (!kpte)
552 return 0; 582 return __cpa_process_fault(cpa, address, primary);
553 583
554 old_pte = *kpte; 584 old_pte = *kpte;
555 if (!pte_val(old_pte)) { 585 if (!pte_val(old_pte))
556 if (!primary) 586 return __cpa_process_fault(cpa, address, primary);
557 return 0;
558 WARN(1, KERN_WARNING "CPA: called for zero pte. "
559 "vaddr = %lx cpa->vaddr = %lx\n", address,
560 *cpa->vaddr);
561 return -EINVAL;
562 }
563 587
564 if (level == PG_LEVEL_4K) { 588 if (level == PG_LEVEL_4K) {
565 pte_t new_pte; 589 pte_t new_pte;
@@ -657,12 +681,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
657 vaddr = *cpa->vaddr; 681 vaddr = *cpa->vaddr;
658 682
659 if (!(within(vaddr, PAGE_OFFSET, 683 if (!(within(vaddr, PAGE_OFFSET,
660 PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT)) 684 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
661#ifdef CONFIG_X86_64
662 || within(vaddr, PAGE_OFFSET + (1UL<<32),
663 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))
664#endif
665 )) {
666 685
667 alias_cpa = *cpa; 686 alias_cpa = *cpa;
668 temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); 687 temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index c9488513fd70..7b61036427df 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -333,6 +333,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
333 req_type & _PAGE_CACHE_MASK); 333 req_type & _PAGE_CACHE_MASK);
334 } 334 }
335 335
336 if (new_type)
337 *new_type = actual_type;
338
336 /* 339 /*
337 * For legacy reasons, some parts of the physical address range in the 340 * For legacy reasons, some parts of the physical address range in the
338 * legacy 1MB region is treated as non-RAM (even when listed as RAM in 341 * legacy 1MB region is treated as non-RAM (even when listed as RAM in
@@ -356,9 +359,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
356 new->end = end; 359 new->end = end;
357 new->type = actual_type; 360 new->type = actual_type;
358 361
359 if (new_type)
360 *new_type = actual_type;
361
362 spin_lock(&memtype_lock); 362 spin_lock(&memtype_lock);
363 363
364 if (cached_entry && start >= cached_start) 364 if (cached_entry && start >= cached_start)
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 4064345cf144..fecbce6e7d7c 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -572,6 +572,7 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
572 case PCI_DEVICE_ID_INTEL_ICH7_1: 572 case PCI_DEVICE_ID_INTEL_ICH7_1:
573 case PCI_DEVICE_ID_INTEL_ICH7_30: 573 case PCI_DEVICE_ID_INTEL_ICH7_30:
574 case PCI_DEVICE_ID_INTEL_ICH7_31: 574 case PCI_DEVICE_ID_INTEL_ICH7_31:
575 case PCI_DEVICE_ID_INTEL_TGP_LPC:
575 case PCI_DEVICE_ID_INTEL_ESB2_0: 576 case PCI_DEVICE_ID_INTEL_ESB2_0:
576 case PCI_DEVICE_ID_INTEL_ICH8_0: 577 case PCI_DEVICE_ID_INTEL_ICH8_0:
577 case PCI_DEVICE_ID_INTEL_ICH8_1: 578 case PCI_DEVICE_ID_INTEL_ICH8_1:
diff --git a/arch/x86/scripts/strip-symbols b/arch/x86/scripts/strip-symbols
deleted file mode 100644
index a2f1ccb827c7..000000000000
--- a/arch/x86/scripts/strip-symbols
+++ /dev/null
@@ -1 +0,0 @@
1__cpu_vendor_dev_X86_VENDOR_*
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
index e786fa7f2615..9e565da5d1f7 100644
--- a/arch/x86/xen/multicalls.h
+++ b/arch/x86/xen/multicalls.h
@@ -19,8 +19,10 @@ DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
19 paired with xen_mc_issue() */ 19 paired with xen_mc_issue() */
20static inline void xen_mc_batch(void) 20static inline void xen_mc_batch(void)
21{ 21{
22 unsigned long flags;
22 /* need to disable interrupts until this entry is complete */ 23 /* need to disable interrupts until this entry is complete */
23 local_irq_save(__get_cpu_var(xen_mc_irq_flags)); 24 local_irq_save(flags);
25 __get_cpu_var(xen_mc_irq_flags) = flags;
24} 26}
25 27
26static inline struct multicall_space xen_mc_entry(size_t args) 28static inline struct multicall_space xen_mc_entry(size_t args)