aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-09 08:58:11 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-09 08:58:11 -0500
commit249d51b53aea1b7cdb1be65a1a9a0c59d9e06f3e (patch)
tree7fc06930e46ee13d394f5b031166c40206af3189 /arch/x86
parent44581a28e805a31661469c4b466b9cd14b36e7b6 (diff)
parent8e4921515c1a379539607eb443d51c30f4f7f338 (diff)
Merge commit 'v2.6.29-rc4' into core/percpu
Conflicts: arch/x86/mach-voyager/voyager_smp.c arch/x86/mm/fault.c
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/boot/video-vesa.c11
-rw-r--r--arch/x86/ia32/ia32entry.S8
-rw-r--r--arch/x86/include/asm/e820.h1
-rw-r--r--arch/x86/include/asm/kvm.h2
-rw-r--r--arch/x86/include/asm/mce.h5
-rw-r--r--arch/x86/include/asm/mtrr.h1
-rw-r--r--arch/x86/include/asm/ptrace-abi.h2
-rw-r--r--arch/x86/include/asm/sigcontext.h2
-rw-r--r--arch/x86/include/asm/sigcontext32.h2
-rw-r--r--arch/x86/include/asm/swab.h2
-rw-r--r--arch/x86/kernel/acpi/sleep.c4
-rw-r--r--arch/x86/kernel/apic.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig11
-rw-r--r--arch/x86/kernel/cpu/intel.c2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c15
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c3
-rw-r--r--arch/x86/kernel/ds.c31
-rw-r--r--arch/x86/kernel/entry_64.S1
-rw-r--r--arch/x86/kernel/io_apic.c5
-rw-r--r--arch/x86/kernel/irqinit_32.c12
-rw-r--r--arch/x86/mach-default/setup.c12
-rw-r--r--arch/x86/mach-voyager/setup.c12
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c24
-rw-r--r--arch/x86/mm/fault.c7
-rw-r--r--arch/x86/pci/irq.c1
-rw-r--r--arch/x86/xen/multicalls.h4
26 files changed, 103 insertions, 79 deletions
diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
index 75115849af33..4a58c8ce3f69 100644
--- a/arch/x86/boot/video-vesa.c
+++ b/arch/x86/boot/video-vesa.c
@@ -269,9 +269,8 @@ void vesa_store_edid(void)
269 we genuinely have to assume all registers are destroyed here. */ 269 we genuinely have to assume all registers are destroyed here. */
270 270
271 asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es" 271 asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es"
272 : "+a" (ax), "+b" (bx) 272 : "+a" (ax), "+b" (bx), "+c" (cx), "+D" (di)
273 : "c" (cx), "D" (di) 273 : : "esi", "edx");
274 : "esi");
275 274
276 if (ax != 0x004f) 275 if (ax != 0x004f)
277 return; /* No EDID */ 276 return; /* No EDID */
@@ -285,9 +284,9 @@ void vesa_store_edid(void)
285 dx = 0; /* EDID block number */ 284 dx = 0; /* EDID block number */
286 di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */ 285 di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */
287 asm(INT10 286 asm(INT10
288 : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info) 287 : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info),
289 : "c" (cx), "D" (di) 288 "+c" (cx), "+D" (di)
290 : "esi"); 289 : : "esi");
291#endif /* CONFIG_FIRMWARE_EDID */ 290#endif /* CONFIG_FIRMWARE_EDID */
292} 291}
293 292
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 9c79b2477008..097a6b64c24d 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -418,9 +418,9 @@ ENTRY(ia32_syscall)
418 orl $TS_COMPAT,TI_status(%r10) 418 orl $TS_COMPAT,TI_status(%r10)
419 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) 419 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
420 jnz ia32_tracesys 420 jnz ia32_tracesys
421ia32_do_syscall:
422 cmpl $(IA32_NR_syscalls-1),%eax 421 cmpl $(IA32_NR_syscalls-1),%eax
423 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ 422 ja ia32_badsys
423ia32_do_call:
424 IA32_ARG_FIXUP 424 IA32_ARG_FIXUP
425 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative 425 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
426ia32_sysret: 426ia32_sysret:
@@ -435,7 +435,9 @@ ia32_tracesys:
435 call syscall_trace_enter 435 call syscall_trace_enter
436 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ 436 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
437 RESTORE_REST 437 RESTORE_REST
438 jmp ia32_do_syscall 438 cmpl $(IA32_NR_syscalls-1),%eax
439 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
440 jmp ia32_do_call
439END(ia32_syscall) 441END(ia32_syscall)
440 442
441ia32_badsys: 443ia32_badsys:
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 3d8ceddbd407..00d41ce4c844 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -49,6 +49,7 @@
49#define E820_RESERVED_KERN 128 49#define E820_RESERVED_KERN 128
50 50
51#ifndef __ASSEMBLY__ 51#ifndef __ASSEMBLY__
52#include <linux/types.h>
52struct e820entry { 53struct e820entry {
53 __u64 addr; /* start of memory segment */ 54 __u64 addr; /* start of memory segment */
54 __u64 size; /* size of memory segment */ 55 __u64 size; /* size of memory segment */
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index b95162af0bf6..d2e3bf3608af 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -6,7 +6,7 @@
6 * 6 *
7 */ 7 */
8 8
9#include <asm/types.h> 9#include <linux/types.h>
10#include <linux/ioctl.h> 10#include <linux/ioctl.h>
11 11
12/* Architectural interrupt line count. */ 12/* Architectural interrupt line count. */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 1d6e17c2f23a..32c6e17b960b 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -3,8 +3,8 @@
3 3
4#ifdef __x86_64__ 4#ifdef __x86_64__
5 5
6#include <linux/types.h>
6#include <asm/ioctls.h> 7#include <asm/ioctls.h>
7#include <asm/types.h>
8 8
9/* 9/*
10 * Machine Check support for x86 10 * Machine Check support for x86
@@ -115,8 +115,6 @@ extern int mce_notify_user(void);
115 115
116#endif /* !CONFIG_X86_32 */ 116#endif /* !CONFIG_X86_32 */
117 117
118
119
120#ifdef CONFIG_X86_MCE 118#ifdef CONFIG_X86_MCE
121extern void mcheck_init(struct cpuinfo_x86 *c); 119extern void mcheck_init(struct cpuinfo_x86 *c);
122#else 120#else
@@ -126,5 +124,4 @@ extern void stop_mce(void);
126extern void restart_mce(void); 124extern void restart_mce(void);
127 125
128#endif /* __KERNEL__ */ 126#endif /* __KERNEL__ */
129
130#endif /* _ASM_X86_MCE_H */ 127#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 14080d22edb3..a51ada8467de 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -23,6 +23,7 @@
23#ifndef _ASM_X86_MTRR_H 23#ifndef _ASM_X86_MTRR_H
24#define _ASM_X86_MTRR_H 24#define _ASM_X86_MTRR_H
25 25
26#include <linux/types.h>
26#include <linux/ioctl.h> 27#include <linux/ioctl.h>
27#include <linux/errno.h> 28#include <linux/errno.h>
28 29
diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h
index 25f1bb8fc626..8e0f8d199e05 100644
--- a/arch/x86/include/asm/ptrace-abi.h
+++ b/arch/x86/include/asm/ptrace-abi.h
@@ -83,7 +83,7 @@
83#ifdef CONFIG_X86_PTRACE_BTS 83#ifdef CONFIG_X86_PTRACE_BTS
84 84
85#ifndef __ASSEMBLY__ 85#ifndef __ASSEMBLY__
86#include <asm/types.h> 86#include <linux/types.h>
87 87
88/* configuration/status structure used in PTRACE_BTS_CONFIG and 88/* configuration/status structure used in PTRACE_BTS_CONFIG and
89 PTRACE_BTS_STATUS commands. 89 PTRACE_BTS_STATUS commands.
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 0afcb5e58acc..ec666491aaa4 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -2,7 +2,7 @@
2#define _ASM_X86_SIGCONTEXT_H 2#define _ASM_X86_SIGCONTEXT_H
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/types.h> 5#include <linux/types.h>
6 6
7#define FP_XSTATE_MAGIC1 0x46505853U 7#define FP_XSTATE_MAGIC1 0x46505853U
8#define FP_XSTATE_MAGIC2 0x46505845U 8#define FP_XSTATE_MAGIC2 0x46505845U
diff --git a/arch/x86/include/asm/sigcontext32.h b/arch/x86/include/asm/sigcontext32.h
index 6126188cf3a9..ad1478c4ae12 100644
--- a/arch/x86/include/asm/sigcontext32.h
+++ b/arch/x86/include/asm/sigcontext32.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_SIGCONTEXT32_H 1#ifndef _ASM_X86_SIGCONTEXT32_H
2#define _ASM_X86_SIGCONTEXT32_H 2#define _ASM_X86_SIGCONTEXT32_H
3 3
4#include <linux/types.h>
5
4/* signal context for 32bit programs. */ 6/* signal context for 32bit programs. */
5 7
6#define X86_FXSR_MAGIC 0x0000 8#define X86_FXSR_MAGIC 0x0000
diff --git a/arch/x86/include/asm/swab.h b/arch/x86/include/asm/swab.h
index 306d4178ffc9..557cd9f00661 100644
--- a/arch/x86/include/asm/swab.h
+++ b/arch/x86/include/asm/swab.h
@@ -1,7 +1,7 @@
1#ifndef _ASM_X86_SWAB_H 1#ifndef _ASM_X86_SWAB_H
2#define _ASM_X86_SWAB_H 2#define _ASM_X86_SWAB_H
3 3
4#include <asm/types.h> 4#include <linux/types.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
6 6
7static inline __attribute_const__ __u32 __arch_swab32(__u32 val) 7static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 4abff454c55b..7c243a2c5115 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -157,11 +157,11 @@ static int __init acpi_sleep_setup(char *str)
157#ifdef CONFIG_HIBERNATION 157#ifdef CONFIG_HIBERNATION
158 if (strncmp(str, "s4_nohwsig", 10) == 0) 158 if (strncmp(str, "s4_nohwsig", 10) == 0)
159 acpi_no_s4_hw_signature(); 159 acpi_no_s4_hw_signature();
160 if (strncmp(str, "s4_nonvs", 8) == 0)
161 acpi_s4_no_nvs();
160#endif 162#endif
161 if (strncmp(str, "old_ordering", 12) == 0) 163 if (strncmp(str, "old_ordering", 12) == 0)
162 acpi_old_suspend_ordering(); 164 acpi_old_suspend_ordering();
163 if (strncmp(str, "s4_nonvs", 8) == 0)
164 acpi_s4_no_nvs();
165 str = strchr(str, ','); 165 str = strchr(str, ',');
166 if (str != NULL) 166 if (str != NULL)
167 str += strspn(str, ", \t"); 167 str += strspn(str, ", \t");
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index c6f15647eba9..383d827eef89 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -1461,7 +1461,7 @@ static int __init detect_init_APIC(void)
1461 switch (boot_cpu_data.x86_vendor) { 1461 switch (boot_cpu_data.x86_vendor) {
1462 case X86_VENDOR_AMD: 1462 case X86_VENDOR_AMD:
1463 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || 1463 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
1464 (boot_cpu_data.x86 == 15)) 1464 (boot_cpu_data.x86 >= 15))
1465 break; 1465 break;
1466 goto no_apic; 1466 goto no_apic;
1467 case X86_VENDOR_INTEL: 1467 case X86_VENDOR_INTEL:
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig
index efae3b22a0ff..65792c2cc462 100644
--- a/arch/x86/kernel/cpu/cpufreq/Kconfig
+++ b/arch/x86/kernel/cpu/cpufreq/Kconfig
@@ -245,17 +245,6 @@ config X86_E_POWERSAVER
245 245
246comment "shared options" 246comment "shared options"
247 247
248config X86_ACPI_CPUFREQ_PROC_INTF
249 bool "/proc/acpi/processor/../performance interface (deprecated)"
250 depends on PROC_FS
251 depends on X86_ACPI_CPUFREQ || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI
252 help
253 This enables the deprecated /proc/acpi/processor/../performance
254 interface. While it is helpful for debugging, the generic,
255 cross-architecture cpufreq interfaces should be used.
256
257 If in doubt, say N.
258
259config X86_SPEEDSTEP_LIB 248config X86_SPEEDSTEP_LIB
260 tristate 249 tristate
261 default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD) 250 default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 549f2ada55f5..430e5c38a544 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -30,7 +30,7 @@
30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
31{ 31{
32 /* Unmask CPUID levels if masked: */ 32 /* Unmask CPUID levels if masked: */
33 if (c->x86 == 6 && c->x86_model >= 15) { 33 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
34 u64 misc_enable; 34 u64 misc_enable;
35 35
36 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 36 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 58527a9fc404..7293508d8f5c 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -36,8 +36,11 @@ static struct _cache_table cache_table[] __cpuinitdata =
36{ 36{
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
39 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ 40 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
40 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ 41 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
42 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
43 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
41 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 44 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
42 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 45 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 46 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
@@ -85,6 +88,18 @@ static struct _cache_table cache_table[] __cpuinitdata =
85 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ 88 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
86 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ 89 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
87 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ 90 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
91 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
92 { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
93 { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
94 { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
95 { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
96 { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
97 { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
98 { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
99 { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
100 { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
101 { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
102 { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
88 { 0x00, 0, 0} 103 { 0x00, 0, 0}
89}; 104};
90 105
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index d259e5d2e054..236a401b8259 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -1594,8 +1594,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1594 1594
1595 /* kvm/qemu doesn't have mtrr set right, don't trim them all */ 1595 /* kvm/qemu doesn't have mtrr set right, don't trim them all */
1596 if (!highest_pfn) { 1596 if (!highest_pfn) {
1597 WARN(!kvm_para_available(), KERN_WARNING 1597 printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
1598 "WARNING: strange, CPU MTRRs all blank?\n");
1599 return 0; 1598 return 0;
1600 } 1599 }
1601 1600
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index da91701a2348..169a120587be 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -15,8 +15,8 @@
15 * - buffer allocation (memory accounting) 15 * - buffer allocation (memory accounting)
16 * 16 *
17 * 17 *
18 * Copyright (C) 2007-2008 Intel Corporation. 18 * Copyright (C) 2007-2009 Intel Corporation.
19 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008 19 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
20 */ 20 */
21 21
22 22
@@ -890,7 +890,7 @@ int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value)
890} 890}
891 891
892static const struct ds_configuration ds_cfg_netburst = { 892static const struct ds_configuration ds_cfg_netburst = {
893 .name = "netburst", 893 .name = "Netburst",
894 .ctl[dsf_bts] = (1 << 2) | (1 << 3), 894 .ctl[dsf_bts] = (1 << 2) | (1 << 3),
895 .ctl[dsf_bts_kernel] = (1 << 5), 895 .ctl[dsf_bts_kernel] = (1 << 5),
896 .ctl[dsf_bts_user] = (1 << 6), 896 .ctl[dsf_bts_user] = (1 << 6),
@@ -904,7 +904,7 @@ static const struct ds_configuration ds_cfg_netburst = {
904#endif 904#endif
905}; 905};
906static const struct ds_configuration ds_cfg_pentium_m = { 906static const struct ds_configuration ds_cfg_pentium_m = {
907 .name = "pentium m", 907 .name = "Pentium M",
908 .ctl[dsf_bts] = (1 << 6) | (1 << 7), 908 .ctl[dsf_bts] = (1 << 6) | (1 << 7),
909 909
910 .sizeof_field = sizeof(long), 910 .sizeof_field = sizeof(long),
@@ -915,8 +915,8 @@ static const struct ds_configuration ds_cfg_pentium_m = {
915 .sizeof_rec[ds_pebs] = sizeof(long) * 18, 915 .sizeof_rec[ds_pebs] = sizeof(long) * 18,
916#endif 916#endif
917}; 917};
918static const struct ds_configuration ds_cfg_core2 = { 918static const struct ds_configuration ds_cfg_core2_atom = {
919 .name = "core 2", 919 .name = "Core 2/Atom",
920 .ctl[dsf_bts] = (1 << 6) | (1 << 7), 920 .ctl[dsf_bts] = (1 << 6) | (1 << 7),
921 .ctl[dsf_bts_kernel] = (1 << 9), 921 .ctl[dsf_bts_kernel] = (1 << 9),
922 .ctl[dsf_bts_user] = (1 << 10), 922 .ctl[dsf_bts_user] = (1 << 10),
@@ -949,19 +949,22 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
949 switch (c->x86) { 949 switch (c->x86) {
950 case 0x6: 950 case 0x6:
951 switch (c->x86_model) { 951 switch (c->x86_model) {
952 case 0 ... 0xC: 952 case 0x9:
953 /* sorry, don't know about them */ 953 case 0xd: /* Pentium M */
954 break;
955 case 0xD:
956 case 0xE: /* Pentium M */
957 ds_configure(&ds_cfg_pentium_m); 954 ds_configure(&ds_cfg_pentium_m);
958 break; 955 break;
959 default: /* Core2, Atom, ... */ 956 case 0xf:
960 ds_configure(&ds_cfg_core2); 957 case 0x17: /* Core2 */
958 case 0x1c: /* Atom */
959 ds_configure(&ds_cfg_core2_atom);
960 break;
961 case 0x1a: /* i7 */
962 default:
963 /* sorry, don't know about them */
961 break; 964 break;
962 } 965 }
963 break; 966 break;
964 case 0xF: 967 case 0xf:
965 switch (c->x86_model) { 968 switch (c->x86_model) {
966 case 0x0: 969 case 0x0:
967 case 0x1: 970 case 0x1:
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index e4c9710cae52..586bed677557 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -347,6 +347,7 @@ ENTRY(save_args)
347 popq_cfi %rax /* move return address... */ 347 popq_cfi %rax /* move return address... */
348 mov PER_CPU_VAR(irq_stack_ptr),%rsp 348 mov PER_CPU_VAR(irq_stack_ptr),%rsp
349 EMPTY_FRAME 0 349 EMPTY_FRAME 0
350 pushq_cfi %rbp /* backlink for unwinder */
350 pushq_cfi %rax /* ... to the new stack */ 351 pushq_cfi %rax /* ... to the new stack */
351 /* 352 /*
352 * We entered an interrupt context - irqs are off: 353 * We entered an interrupt context - irqs are off:
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index e4d36bd56b62..0a7f6d6b1206 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -2527,14 +2527,15 @@ static void irq_complete_move(struct irq_desc **descp)
2527 2527
2528 vector = ~get_irq_regs()->orig_ax; 2528 vector = ~get_irq_regs()->orig_ax;
2529 me = smp_processor_id(); 2529 me = smp_processor_id();
2530
2531 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) {
2530#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC 2532#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
2531 *descp = desc = move_irq_desc(desc, me); 2533 *descp = desc = move_irq_desc(desc, me);
2532 /* get the new one */ 2534 /* get the new one */
2533 cfg = desc->chip_data; 2535 cfg = desc->chip_data;
2534#endif 2536#endif
2535
2536 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2537 send_cleanup_vector(cfg); 2537 send_cleanup_vector(cfg);
2538 }
2538} 2539}
2539#else 2540#else
2540static inline void irq_complete_move(struct irq_desc **descp) {} 2541static inline void irq_complete_move(struct irq_desc **descp) {}
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index bf629cadec1a..22608ebf831b 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -78,15 +78,6 @@ void __init init_ISA_irqs(void)
78 } 78 }
79} 79}
80 80
81/*
82 * IRQ2 is cascade interrupt to second interrupt controller
83 */
84static struct irqaction irq2 = {
85 .handler = no_action,
86 .mask = CPU_MASK_NONE,
87 .name = "cascade",
88};
89
90DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 81DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
91 [0 ... IRQ0_VECTOR - 1] = -1, 82 [0 ... IRQ0_VECTOR - 1] = -1,
92 [IRQ0_VECTOR] = 0, 83 [IRQ0_VECTOR] = 0,
@@ -185,9 +176,6 @@ void __init native_init_IRQ(void)
185 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); 176 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
186#endif 177#endif
187 178
188 if (!acpi_ioapic)
189 setup_irq(2, &irq2);
190
191 /* setup after call gates are initialised (usually add in 179 /* setup after call gates are initialised (usually add in
192 * the architecture specific gates) 180 * the architecture specific gates)
193 */ 181 */
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c
index df167f265622..a265a7c63190 100644
--- a/arch/x86/mach-default/setup.c
+++ b/arch/x86/mach-default/setup.c
@@ -38,6 +38,15 @@ void __init pre_intr_init_hook(void)
38 init_ISA_irqs(); 38 init_ISA_irqs();
39} 39}
40 40
41/*
42 * IRQ2 is cascade interrupt to second interrupt controller
43 */
44static struct irqaction irq2 = {
45 .handler = no_action,
46 .mask = CPU_MASK_NONE,
47 .name = "cascade",
48};
49
41/** 50/**
42 * intr_init_hook - post gate setup interrupt initialisation 51 * intr_init_hook - post gate setup interrupt initialisation
43 * 52 *
@@ -53,6 +62,9 @@ void __init intr_init_hook(void)
53 if (x86_quirks->arch_intr_init()) 62 if (x86_quirks->arch_intr_init())
54 return; 63 return;
55 } 64 }
65 if (!acpi_ioapic)
66 setup_irq(2, &irq2);
67
56} 68}
57 69
58/** 70/**
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
index 0ade62555ff3..66b7eb57d8e4 100644
--- a/arch/x86/mach-voyager/setup.c
+++ b/arch/x86/mach-voyager/setup.c
@@ -34,13 +34,23 @@ void __init intr_init_hook(void)
34 setup_irq(2, &irq2); 34 setup_irq(2, &irq2);
35} 35}
36 36
37void __init pre_setup_arch_hook(void) 37static void voyager_disable_tsc(void)
38{ 38{
39 /* Voyagers run their CPUs from independent clocks, so disable 39 /* Voyagers run their CPUs from independent clocks, so disable
40 * the TSC code because we can't sync them */ 40 * the TSC code because we can't sync them */
41 setup_clear_cpu_cap(X86_FEATURE_TSC); 41 setup_clear_cpu_cap(X86_FEATURE_TSC);
42} 42}
43 43
44void __init pre_setup_arch_hook(void)
45{
46 voyager_disable_tsc();
47}
48
49void __init pre_time_init_hook(void)
50{
51 voyager_disable_tsc();
52}
53
44void __init trap_init_hook(void) 54void __init trap_init_hook(void)
45{ 55{
46} 56}
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 58c7cac3440d..6f5a38c7f900 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq);
81static void disable_local_vic_irq(unsigned int irq); 81static void disable_local_vic_irq(unsigned int irq);
82static void before_handle_vic_irq(unsigned int irq); 82static void before_handle_vic_irq(unsigned int irq);
83static void after_handle_vic_irq(unsigned int irq); 83static void after_handle_vic_irq(unsigned int irq);
84static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); 84static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask);
85static void ack_vic_irq(unsigned int irq); 85static void ack_vic_irq(unsigned int irq);
86static void vic_enable_cpi(void); 86static void vic_enable_cpi(void);
87static void do_boot_cpu(__u8 cpuid); 87static void do_boot_cpu(__u8 cpuid);
@@ -211,8 +211,6 @@ static __u32 cpu_booted_map;
211static cpumask_t smp_commenced_mask = CPU_MASK_NONE; 211static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
212 212
213/* This is for the new dynamic CPU boot code */ 213/* This is for the new dynamic CPU boot code */
214cpumask_t cpu_callin_map = CPU_MASK_NONE;
215cpumask_t cpu_callout_map = CPU_MASK_NONE;
216 214
217/* The per processor IRQ masks (these are usually kept in sync) */ 215/* The per processor IRQ masks (these are usually kept in sync) */
218static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; 216static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
@@ -378,7 +376,7 @@ void __init find_smp_config(void)
378 cpus_addr(phys_cpu_present_map)[0] |= 376 cpus_addr(phys_cpu_present_map)[0] |=
379 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 377 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
380 3) << 24; 378 3) << 24;
381 cpu_possible_map = phys_cpu_present_map; 379 init_cpu_possible(&phys_cpu_present_map);
382 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", 380 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
383 cpus_addr(phys_cpu_present_map)[0]); 381 cpus_addr(phys_cpu_present_map)[0]);
384 /* Here we set up the VIC to enable SMP */ 382 /* Here we set up the VIC to enable SMP */
@@ -1598,16 +1596,16 @@ static void after_handle_vic_irq(unsigned int irq)
1598 * change the mask and then do an interrupt enable CPI to re-enable on 1596 * change the mask and then do an interrupt enable CPI to re-enable on
1599 * the selected processors */ 1597 * the selected processors */
1600 1598
1601void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) 1599void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask)
1602{ 1600{
1603 /* Only extended processors handle interrupts */ 1601 /* Only extended processors handle interrupts */
1604 unsigned long real_mask; 1602 unsigned long real_mask;
1605 unsigned long irq_mask = 1 << irq; 1603 unsigned long irq_mask = 1 << irq;
1606 int cpu; 1604 int cpu;
1607 1605
1608 real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; 1606 real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors;
1609 1607
1610 if (cpus_addr(mask)[0] == 0) 1608 if (cpus_addr(*mask)[0] == 0)
1611 /* can't have no CPUs to accept the interrupt -- extremely 1609 /* can't have no CPUs to accept the interrupt -- extremely
1612 * bad things will happen */ 1610 * bad things will happen */
1613 return; 1611 return;
@@ -1749,10 +1747,10 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void)
1749 int cpu = smp_processor_id(); 1747 int cpu = smp_processor_id();
1750 switch_to_new_gdt(cpu); 1748 switch_to_new_gdt(cpu);
1751 1749
1752 cpu_set(cpu, cpu_online_map); 1750 cpu_online_map = cpumask_of_cpu(smp_processor_id());
1753 cpu_set(cpu, cpu_callout_map); 1751 cpu_callout_map = cpumask_of_cpu(smp_processor_id());
1754 cpu_set(cpu, cpu_possible_map); 1752 cpu_callin_map = CPU_MASK_NONE;
1755 cpu_set(cpu, cpu_present_map); 1753 cpu_present_map = cpumask_of_cpu(smp_processor_id());
1756} 1754}
1757 1755
1758static int __cpuinit voyager_cpu_up(unsigned int cpu) 1756static int __cpuinit voyager_cpu_up(unsigned int cpu)
@@ -1781,9 +1779,9 @@ void __init smp_setup_processor_id(void)
1781 current_thread_info()->cpu = hard_smp_processor_id(); 1779 current_thread_info()->cpu = hard_smp_processor_id();
1782} 1780}
1783 1781
1784static void voyager_send_call_func(cpumask_t callmask) 1782static void voyager_send_call_func(const struct cpumask *callmask)
1785{ 1783{
1786 __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); 1784 __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
1787 send_CPI(mask, VIC_CALL_FUNCTION_CPI); 1785 send_CPI(mask, VIC_CALL_FUNCTION_CPI);
1788} 1786}
1789 1787
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 65709a6aa6ee..8c3f3113a6ec 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -807,8 +807,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
807 /* get the address */ 807 /* get the address */
808 address = read_cr2(); 808 address = read_cr2();
809 809
810 if (unlikely(notify_page_fault(regs)))
811 return;
812 if (unlikely(kmmio_fault(regs, address))) 810 if (unlikely(kmmio_fault(regs, address)))
813 return; 811 return;
814 812
@@ -838,6 +836,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
838 if (spurious_fault(error_code, address)) 836 if (spurious_fault(error_code, address))
839 return; 837 return;
840 838
839 /* kprobes don't want to hook the spurious faults. */
840 if (notify_page_fault(regs))
841 return;
841 /* 842 /*
842 * Don't take the mm semaphore here. If we fixup a prefetch 843 * Don't take the mm semaphore here. If we fixup a prefetch
843 * fault we could otherwise deadlock. 844 * fault we could otherwise deadlock.
@@ -846,6 +847,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
846 return; 847 return;
847 } 848 }
848 849
850 if (unlikely(notify_page_fault(regs)))
851 return;
849 /* 852 /*
850 * It's safe to allow irq's after cr2 has been saved and the 853 * It's safe to allow irq's after cr2 has been saved and the
851 * vmalloc fault has been handled. 854 * vmalloc fault has been handled.
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 4064345cf144..fecbce6e7d7c 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -572,6 +572,7 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
572 case PCI_DEVICE_ID_INTEL_ICH7_1: 572 case PCI_DEVICE_ID_INTEL_ICH7_1:
573 case PCI_DEVICE_ID_INTEL_ICH7_30: 573 case PCI_DEVICE_ID_INTEL_ICH7_30:
574 case PCI_DEVICE_ID_INTEL_ICH7_31: 574 case PCI_DEVICE_ID_INTEL_ICH7_31:
575 case PCI_DEVICE_ID_INTEL_TGP_LPC:
575 case PCI_DEVICE_ID_INTEL_ESB2_0: 576 case PCI_DEVICE_ID_INTEL_ESB2_0:
576 case PCI_DEVICE_ID_INTEL_ICH8_0: 577 case PCI_DEVICE_ID_INTEL_ICH8_0:
577 case PCI_DEVICE_ID_INTEL_ICH8_1: 578 case PCI_DEVICE_ID_INTEL_ICH8_1:
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
index e786fa7f2615..9e565da5d1f7 100644
--- a/arch/x86/xen/multicalls.h
+++ b/arch/x86/xen/multicalls.h
@@ -19,8 +19,10 @@ DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
19 paired with xen_mc_issue() */ 19 paired with xen_mc_issue() */
20static inline void xen_mc_batch(void) 20static inline void xen_mc_batch(void)
21{ 21{
22 unsigned long flags;
22 /* need to disable interrupts until this entry is complete */ 23 /* need to disable interrupts until this entry is complete */
23 local_irq_save(__get_cpu_var(xen_mc_irq_flags)); 24 local_irq_save(flags);
25 __get_cpu_var(xen_mc_irq_flags) = flags;
24} 26}
25 27
26static inline struct multicall_space xen_mc_entry(size_t args) 28static inline struct multicall_space xen_mc_entry(size_t args)