aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2010-03-19 03:05:10 -0400
committerJens Axboe <jens.axboe@oracle.com>2010-03-19 03:05:10 -0400
commitb4b7a4ef097f288f724420b473dbf92a89c0ab7e (patch)
tree23ad8101e3e77c32a8d1e1b95a9c1cd7f7a475b7 /arch/x86
parente9ce335df51ff782035a15c261a3c0c9892a1767 (diff)
parenta3d3203e4bb40f253b1541e310dc0f9305be7c84 (diff)
Merge branch 'master' into for-linus
Conflicts: block/Kconfig Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/crypto/twofish-i586-asm_32.S10
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64.S20
-rw-r--r--arch/x86/ia32/ia32entry.S6
-rw-r--r--arch/x86/ia32/sys_ia32.c76
-rw-r--r--arch/x86/include/asm/compat.h3
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h1
-rw-r--r--arch/x86/include/asm/pci.h28
-rw-r--r--arch/x86/include/asm/perf_event.h16
-rw-r--r--arch/x86/include/asm/ptrace.h7
-rw-r--r--arch/x86/include/asm/sys_ia32.h11
-rw-r--r--arch/x86/include/asm/syscalls.h15
-rw-r--r--arch/x86/include/asm/unistd_32.h4
-rw-r--r--arch/x86/include/asm/unistd_64.h3
-rw-r--r--arch/x86/kernel/acpi/boot.c100
-rw-r--r--arch/x86/kernel/aperture_64.c1
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c16
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c39
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c37
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c8
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/dumpstack_64.c10
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c12
-rw-r--r--arch/x86/kernel/k8.c14
-rw-r--r--arch/x86/kernel/pci-calgary_64.c2
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/pci-gart_64.c2
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/sys_i386_32.c185
-rw-r--r--arch/x86/kernel/sys_x86_64.c12
-rw-r--r--arch/x86/kernel/syscall_table_32.S4
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/mm/pageattr.c25
-rw-r--r--arch/x86/oprofile/op_model_amd.c23
-rw-r--r--arch/x86/oprofile/op_model_ppro.c6
-rw-r--r--arch/x86/xen/smp.c2
45 files changed, 194 insertions, 542 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e98440371525..0eacb1ffb421 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -102,6 +102,9 @@ config ZONE_DMA
102config SBUS 102config SBUS
103 bool 103 bool
104 104
105config NEED_DMA_MAP_STATE
106 def_bool (X86_64 || DMAR || DMA_API_DEBUG)
107
105config GENERIC_ISA_DMA 108config GENERIC_ISA_DMA
106 def_bool y 109 def_bool y
107 110
@@ -659,7 +662,7 @@ config GART_IOMMU
659 bool "GART IOMMU support" if EMBEDDED 662 bool "GART IOMMU support" if EMBEDDED
660 default y 663 default y
661 select SWIOTLB 664 select SWIOTLB
662 depends on X86_64 && PCI 665 depends on X86_64 && PCI && K8_NB
663 ---help--- 666 ---help---
664 Support for full DMA access of devices with 32bit memory access only 667 Support for full DMA access of devices with 32bit memory access only
665 on systems with more than 3GB. This is usually needed for USB, 668 on systems with more than 3GB. This is usually needed for USB,
@@ -2058,7 +2061,7 @@ endif # X86_32
2058 2061
2059config K8_NB 2062config K8_NB
2060 def_bool y 2063 def_bool y
2061 depends on AGP_AMD64 || (X86_64 && (GART_IOMMU || (PCI && NUMA))) 2064 depends on CPU_SUP_AMD && PCI
2062 2065
2063source "drivers/pcmcia/Kconfig" 2066source "drivers/pcmcia/Kconfig"
2064 2067
diff --git a/arch/x86/crypto/twofish-i586-asm_32.S b/arch/x86/crypto/twofish-i586-asm_32.S
index 39b98ed2c1b9..575331cb2a8a 100644
--- a/arch/x86/crypto/twofish-i586-asm_32.S
+++ b/arch/x86/crypto/twofish-i586-asm_32.S
@@ -22,7 +22,7 @@
22 22
23#include <asm/asm-offsets.h> 23#include <asm/asm-offsets.h>
24 24
25/* return adress at 0 */ 25/* return address at 0 */
26 26
27#define in_blk 12 /* input byte array address parameter*/ 27#define in_blk 12 /* input byte array address parameter*/
28#define out_blk 8 /* output byte array address parameter*/ 28#define out_blk 8 /* output byte array address parameter*/
@@ -230,8 +230,8 @@ twofish_enc_blk:
230 push %edi 230 push %edi
231 231
232 mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */ 232 mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */
233 add $crypto_tfm_ctx_offset, %ebp /* ctx adress */ 233 add $crypto_tfm_ctx_offset, %ebp /* ctx address */
234 mov in_blk+16(%esp),%edi /* input adress in edi */ 234 mov in_blk+16(%esp),%edi /* input address in edi */
235 235
236 mov (%edi), %eax 236 mov (%edi), %eax
237 mov b_offset(%edi), %ebx 237 mov b_offset(%edi), %ebx
@@ -286,8 +286,8 @@ twofish_dec_blk:
286 286
287 287
288 mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */ 288 mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */
289 add $crypto_tfm_ctx_offset, %ebp /* ctx adress */ 289 add $crypto_tfm_ctx_offset, %ebp /* ctx address */
290 mov in_blk+16(%esp),%edi /* input adress in edi */ 290 mov in_blk+16(%esp),%edi /* input address in edi */
291 291
292 mov (%edi), %eax 292 mov (%edi), %eax
293 mov b_offset(%edi), %ebx 293 mov b_offset(%edi), %ebx
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
index 35974a586615..573aa102542e 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
@@ -221,11 +221,11 @@
221twofish_enc_blk: 221twofish_enc_blk:
222 pushq R1 222 pushq R1
223 223
224 /* %rdi contains the crypto tfm adress */ 224 /* %rdi contains the crypto tfm address */
225 /* %rsi contains the output adress */ 225 /* %rsi contains the output address */
226 /* %rdx contains the input adress */ 226 /* %rdx contains the input address */
227 add $crypto_tfm_ctx_offset, %rdi /* set ctx adress */ 227 add $crypto_tfm_ctx_offset, %rdi /* set ctx address */
228 /* ctx adress is moved to free one non-rex register 228 /* ctx address is moved to free one non-rex register
229 as target for the 8bit high operations */ 229 as target for the 8bit high operations */
230 mov %rdi, %r11 230 mov %rdi, %r11
231 231
@@ -274,11 +274,11 @@ twofish_enc_blk:
274twofish_dec_blk: 274twofish_dec_blk:
275 pushq R1 275 pushq R1
276 276
277 /* %rdi contains the crypto tfm adress */ 277 /* %rdi contains the crypto tfm address */
278 /* %rsi contains the output adress */ 278 /* %rsi contains the output address */
279 /* %rdx contains the input adress */ 279 /* %rdx contains the input address */
280 add $crypto_tfm_ctx_offset, %rdi /* set ctx adress */ 280 add $crypto_tfm_ctx_offset, %rdi /* set ctx address */
281 /* ctx adress is moved to free one non-rex register 281 /* ctx address is moved to free one non-rex register
282 as target for the 8bit high operations */ 282 as target for the 8bit high operations */
283 mov %rdi, %r11 283 mov %rdi, %r11
284 284
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 53147ad85b96..59b4556a5b92 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -563,7 +563,7 @@ ia32_sys_call_table:
563 .quad quiet_ni_syscall /* old mpx syscall holder */ 563 .quad quiet_ni_syscall /* old mpx syscall holder */
564 .quad sys_setpgid 564 .quad sys_setpgid
565 .quad quiet_ni_syscall /* old ulimit syscall holder */ 565 .quad quiet_ni_syscall /* old ulimit syscall holder */
566 .quad sys32_olduname 566 .quad sys_olduname
567 .quad sys_umask /* 60 */ 567 .quad sys_umask /* 60 */
568 .quad sys_chroot 568 .quad sys_chroot
569 .quad compat_sys_ustat 569 .quad compat_sys_ustat
@@ -586,7 +586,7 @@ ia32_sys_call_table:
586 .quad compat_sys_settimeofday 586 .quad compat_sys_settimeofday
587 .quad sys_getgroups16 /* 80 */ 587 .quad sys_getgroups16 /* 80 */
588 .quad sys_setgroups16 588 .quad sys_setgroups16
589 .quad sys32_old_select 589 .quad compat_sys_old_select
590 .quad sys_symlink 590 .quad sys_symlink
591 .quad sys_lstat 591 .quad sys_lstat
592 .quad sys_readlink /* 85 */ 592 .quad sys_readlink /* 85 */
@@ -613,7 +613,7 @@ ia32_sys_call_table:
613 .quad compat_sys_newstat 613 .quad compat_sys_newstat
614 .quad compat_sys_newlstat 614 .quad compat_sys_newlstat
615 .quad compat_sys_newfstat 615 .quad compat_sys_newfstat
616 .quad sys32_uname 616 .quad sys_uname
617 .quad stub32_iopl /* 110 */ 617 .quad stub32_iopl /* 110 */
618 .quad sys_vhangup 618 .quad sys_vhangup
619 .quad quiet_ni_syscall /* old "idle" system call */ 619 .quad quiet_ni_syscall /* old "idle" system call */
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 422572c77923..74c35431b7d8 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -143,7 +143,7 @@ asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename,
143 * block for parameter passing.. 143 * block for parameter passing..
144 */ 144 */
145 145
146struct mmap_arg_struct { 146struct mmap_arg_struct32 {
147 unsigned int addr; 147 unsigned int addr;
148 unsigned int len; 148 unsigned int len;
149 unsigned int prot; 149 unsigned int prot;
@@ -152,9 +152,9 @@ struct mmap_arg_struct {
152 unsigned int offset; 152 unsigned int offset;
153}; 153};
154 154
155asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg) 155asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
156{ 156{
157 struct mmap_arg_struct a; 157 struct mmap_arg_struct32 a;
158 158
159 if (copy_from_user(&a, arg, sizeof(a))) 159 if (copy_from_user(&a, arg, sizeof(a)))
160 return -EFAULT; 160 return -EFAULT;
@@ -332,24 +332,6 @@ asmlinkage long sys32_alarm(unsigned int seconds)
332 return alarm_setitimer(seconds); 332 return alarm_setitimer(seconds);
333} 333}
334 334
335struct sel_arg_struct {
336 unsigned int n;
337 unsigned int inp;
338 unsigned int outp;
339 unsigned int exp;
340 unsigned int tvp;
341};
342
343asmlinkage long sys32_old_select(struct sel_arg_struct __user *arg)
344{
345 struct sel_arg_struct a;
346
347 if (copy_from_user(&a, arg, sizeof(a)))
348 return -EFAULT;
349 return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
350 compat_ptr(a.exp), compat_ptr(a.tvp));
351}
352
353asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, 335asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
354 int options) 336 int options)
355{ 337{
@@ -466,58 +448,6 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
466 return ret; 448 return ret;
467} 449}
468 450
469asmlinkage long sys32_olduname(struct oldold_utsname __user *name)
470{
471 char *arch = "x86_64";
472 int err;
473
474 if (!name)
475 return -EFAULT;
476 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
477 return -EFAULT;
478
479 down_read(&uts_sem);
480
481 err = __copy_to_user(&name->sysname, &utsname()->sysname,
482 __OLD_UTS_LEN);
483 err |= __put_user(0, name->sysname+__OLD_UTS_LEN);
484 err |= __copy_to_user(&name->nodename, &utsname()->nodename,
485 __OLD_UTS_LEN);
486 err |= __put_user(0, name->nodename+__OLD_UTS_LEN);
487 err |= __copy_to_user(&name->release, &utsname()->release,
488 __OLD_UTS_LEN);
489 err |= __put_user(0, name->release+__OLD_UTS_LEN);
490 err |= __copy_to_user(&name->version, &utsname()->version,
491 __OLD_UTS_LEN);
492 err |= __put_user(0, name->version+__OLD_UTS_LEN);
493
494 if (personality(current->personality) == PER_LINUX32)
495 arch = "i686";
496
497 err |= __copy_to_user(&name->machine, arch, strlen(arch) + 1);
498
499 up_read(&uts_sem);
500
501 err = err ? -EFAULT : 0;
502
503 return err;
504}
505
506long sys32_uname(struct old_utsname __user *name)
507{
508 int err;
509
510 if (!name)
511 return -EFAULT;
512 down_read(&uts_sem);
513 err = copy_to_user(name, utsname(), sizeof(*name));
514 up_read(&uts_sem);
515 if (personality(current->personality) == PER_LINUX32)
516 err |= copy_to_user(&name->machine, "i686", 5);
517
518 return err ? -EFAULT : 0;
519}
520
521asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv, 451asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
522 compat_uptr_t __user *envp, struct pt_regs *regs) 452 compat_uptr_t __user *envp, struct pt_regs *regs)
523{ 453{
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 9a9c7bdc923d..306160e58b48 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -8,7 +8,8 @@
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <asm/user32.h> 9#include <asm/user32.h>
10 10
11#define COMPAT_USER_HZ 100 11#define COMPAT_USER_HZ 100
12#define COMPAT_UTS_MACHINE "i686\0\0"
12 13
13typedef u32 compat_size_t; 14typedef u32 compat_size_t;
14typedef s32 compat_ssize_t; 15typedef s32 compat_ssize_t;
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 0675a7c4c20e..2a1bd8f4f23a 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -10,7 +10,6 @@
10 * (display/resolving) 10 * (display/resolving)
11 */ 11 */
12struct arch_hw_breakpoint { 12struct arch_hw_breakpoint {
13 char *name; /* Contains name of the symbol to set bkpt */
14 unsigned long address; 13 unsigned long address;
15 u8 len; 14 u8 len;
16 u8 type; 15 u8 type;
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 3e002ca5a287..404a880ea325 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -97,34 +97,6 @@ extern void pci_iommu_alloc(void);
97 97
98#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) 98#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
99 99
100#if defined(CONFIG_X86_64) || defined(CONFIG_DMAR) || defined(CONFIG_DMA_API_DEBUG)
101
102#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
103 dma_addr_t ADDR_NAME;
104#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
105 __u32 LEN_NAME;
106#define pci_unmap_addr(PTR, ADDR_NAME) \
107 ((PTR)->ADDR_NAME)
108#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
109 (((PTR)->ADDR_NAME) = (VAL))
110#define pci_unmap_len(PTR, LEN_NAME) \
111 ((PTR)->LEN_NAME)
112#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
113 (((PTR)->LEN_NAME) = (VAL))
114
115#else
116
117#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0];
118#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0];
119#define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME)
120#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
121 do { break; } while (pci_unmap_addr(PTR, ADDR_NAME))
122#define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME)
123#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
124 do { break; } while (pci_unmap_len(PTR, LEN_NAME))
125
126#endif
127
128#endif /* __KERNEL__ */ 100#endif /* __KERNEL__ */
129 101
130#ifdef CONFIG_X86_64 102#ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index befd172c82ad..db6109a885a7 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -18,7 +18,7 @@
18#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 18#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
20 20
21#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) 21#define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22)
22#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21) 22#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21)
23#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) 23#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
24#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) 24#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
@@ -50,7 +50,7 @@
50 INTEL_ARCH_INV_MASK| \ 50 INTEL_ARCH_INV_MASK| \
51 INTEL_ARCH_EDGE_MASK|\ 51 INTEL_ARCH_EDGE_MASK|\
52 INTEL_ARCH_UNIT_MASK|\ 52 INTEL_ARCH_UNIT_MASK|\
53 INTEL_ARCH_EVTSEL_MASK) 53 INTEL_ARCH_EVENT_MASK)
54 54
55#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 55#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
56#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 56#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
@@ -117,6 +117,18 @@ union cpuid10_edx {
117 */ 117 */
118#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) 118#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
119 119
120/* IbsFetchCtl bits/masks */
121#define IBS_FETCH_RAND_EN (1ULL<<57)
122#define IBS_FETCH_VAL (1ULL<<49)
123#define IBS_FETCH_ENABLE (1ULL<<48)
124#define IBS_FETCH_CNT 0xFFFF0000ULL
125#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
126
127/* IbsOpCtl bits */
128#define IBS_OP_CNT_CTL (1ULL<<19)
129#define IBS_OP_VAL (1ULL<<18)
130#define IBS_OP_ENABLE (1ULL<<17)
131#define IBS_OP_MAX_CNT 0x0000FFFFULL
120 132
121#ifdef CONFIG_PERF_EVENTS 133#ifdef CONFIG_PERF_EVENTS
122extern void init_hw_perf_events(void); 134extern void init_hw_perf_events(void);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 20102808b191..69a686a7dff0 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -274,14 +274,7 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
274 return 0; 274 return 0;
275} 275}
276 276
277/*
278 * These are defined as per linux/ptrace.h, which see.
279 */
280#define arch_has_single_step() (1) 277#define arch_has_single_step() (1)
281extern void user_enable_single_step(struct task_struct *);
282extern void user_disable_single_step(struct task_struct *);
283
284extern void user_enable_block_step(struct task_struct *);
285#ifdef CONFIG_X86_DEBUGCTLMSR 278#ifdef CONFIG_X86_DEBUGCTLMSR
286#define arch_has_block_step() (1) 279#define arch_has_block_step() (1)
287#else 280#else
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index d5f69045c100..3ad421784ae7 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -26,8 +26,8 @@ asmlinkage long sys32_lstat64(char __user *, struct stat64 __user *);
26asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *); 26asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *);
27asmlinkage long sys32_fstatat(unsigned int, char __user *, 27asmlinkage long sys32_fstatat(unsigned int, char __user *,
28 struct stat64 __user *, int); 28 struct stat64 __user *, int);
29struct mmap_arg_struct; 29struct mmap_arg_struct32;
30asmlinkage long sys32_mmap(struct mmap_arg_struct __user *); 30asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *);
31asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long); 31asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long);
32 32
33struct sigaction32; 33struct sigaction32;
@@ -40,8 +40,6 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
40 compat_sigset_t __user *, unsigned int); 40 compat_sigset_t __user *, unsigned int);
41asmlinkage long sys32_alarm(unsigned int); 41asmlinkage long sys32_alarm(unsigned int);
42 42
43struct sel_arg_struct;
44asmlinkage long sys32_old_select(struct sel_arg_struct __user *);
45asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); 43asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
46asmlinkage long sys32_sysfs(int, u32, u32); 44asmlinkage long sys32_sysfs(int, u32, u32);
47 45
@@ -56,11 +54,6 @@ asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32);
56asmlinkage long sys32_personality(unsigned long); 54asmlinkage long sys32_personality(unsigned long);
57asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); 55asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
58 56
59struct oldold_utsname;
60struct old_utsname;
61asmlinkage long sys32_olduname(struct oldold_utsname __user *);
62long sys32_uname(struct old_utsname __user *);
63
64asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *, 57asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *,
65 compat_uptr_t __user *, struct pt_regs *); 58 compat_uptr_t __user *, struct pt_regs *);
66asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); 59asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *);
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 8868b9420b0e..5c044b43e9a7 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -50,18 +50,6 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
50 struct old_sigaction __user *); 50 struct old_sigaction __user *);
51unsigned long sys_sigreturn(struct pt_regs *); 51unsigned long sys_sigreturn(struct pt_regs *);
52 52
53/* kernel/sys_i386_32.c */
54struct mmap_arg_struct;
55struct sel_arg_struct;
56struct oldold_utsname;
57struct old_utsname;
58
59asmlinkage int old_mmap(struct mmap_arg_struct __user *);
60asmlinkage int old_select(struct sel_arg_struct __user *);
61asmlinkage int sys_ipc(uint, int, int, int, void __user *, long);
62asmlinkage int sys_uname(struct old_utsname __user *);
63asmlinkage int sys_olduname(struct oldold_utsname __user *);
64
65/* kernel/vm86_32.c */ 53/* kernel/vm86_32.c */
66int sys_vm86old(struct vm86_struct __user *, struct pt_regs *); 54int sys_vm86old(struct vm86_struct __user *, struct pt_regs *);
67int sys_vm86(unsigned long, unsigned long, struct pt_regs *); 55int sys_vm86(unsigned long, unsigned long, struct pt_regs *);
@@ -73,11 +61,8 @@ int sys_vm86(unsigned long, unsigned long, struct pt_regs *);
73long sys_arch_prctl(int, unsigned long); 61long sys_arch_prctl(int, unsigned long);
74 62
75/* kernel/sys_x86_64.c */ 63/* kernel/sys_x86_64.c */
76struct new_utsname;
77
78asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long, 64asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
79 unsigned long, unsigned long, unsigned long); 65 unsigned long, unsigned long, unsigned long);
80asmlinkage long sys_uname(struct new_utsname __user *);
81 66
82#endif /* CONFIG_X86_32 */ 67#endif /* CONFIG_X86_32 */
83#endif /* _ASM_X86_SYSCALLS_H */ 68#endif /* _ASM_X86_SYSCALLS_H */
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index 3baf379fa840..beb9b5f8f8a4 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -354,6 +354,7 @@
354#define __ARCH_WANT_STAT64 354#define __ARCH_WANT_STAT64
355#define __ARCH_WANT_SYS_ALARM 355#define __ARCH_WANT_SYS_ALARM
356#define __ARCH_WANT_SYS_GETHOSTNAME 356#define __ARCH_WANT_SYS_GETHOSTNAME
357#define __ARCH_WANT_SYS_IPC
357#define __ARCH_WANT_SYS_PAUSE 358#define __ARCH_WANT_SYS_PAUSE
358#define __ARCH_WANT_SYS_SGETMASK 359#define __ARCH_WANT_SYS_SGETMASK
359#define __ARCH_WANT_SYS_SIGNAL 360#define __ARCH_WANT_SYS_SIGNAL
@@ -366,6 +367,9 @@
366#define __ARCH_WANT_SYS_LLSEEK 367#define __ARCH_WANT_SYS_LLSEEK
367#define __ARCH_WANT_SYS_NICE 368#define __ARCH_WANT_SYS_NICE
368#define __ARCH_WANT_SYS_OLD_GETRLIMIT 369#define __ARCH_WANT_SYS_OLD_GETRLIMIT
370#define __ARCH_WANT_SYS_OLD_UNAME
371#define __ARCH_WANT_SYS_OLD_MMAP
372#define __ARCH_WANT_SYS_OLD_SELECT
369#define __ARCH_WANT_SYS_OLDUMOUNT 373#define __ARCH_WANT_SYS_OLDUMOUNT
370#define __ARCH_WANT_SYS_SIGPENDING 374#define __ARCH_WANT_SYS_SIGPENDING
371#define __ARCH_WANT_SYS_SIGPROCMASK 375#define __ARCH_WANT_SYS_SIGPROCMASK
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 4843f7ba754a..ff4307b0e81e 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -146,7 +146,7 @@ __SYSCALL(__NR_wait4, sys_wait4)
146#define __NR_kill 62 146#define __NR_kill 62
147__SYSCALL(__NR_kill, sys_kill) 147__SYSCALL(__NR_kill, sys_kill)
148#define __NR_uname 63 148#define __NR_uname 63
149__SYSCALL(__NR_uname, sys_uname) 149__SYSCALL(__NR_uname, sys_newuname)
150 150
151#define __NR_semget 64 151#define __NR_semget 64
152__SYSCALL(__NR_semget, sys_semget) 152__SYSCALL(__NR_semget, sys_semget)
@@ -680,6 +680,7 @@ __SYSCALL(__NR_recvmmsg, sys_recvmmsg)
680#define __ARCH_WANT_SYS_LLSEEK 680#define __ARCH_WANT_SYS_LLSEEK
681#define __ARCH_WANT_SYS_NICE 681#define __ARCH_WANT_SYS_NICE
682#define __ARCH_WANT_SYS_OLD_GETRLIMIT 682#define __ARCH_WANT_SYS_OLD_GETRLIMIT
683#define __ARCH_WANT_SYS_OLD_UNAME
683#define __ARCH_WANT_SYS_OLDUMOUNT 684#define __ARCH_WANT_SYS_OLDUMOUNT
684#define __ARCH_WANT_SYS_SIGPENDING 685#define __ARCH_WANT_SYS_SIGPENDING
685#define __ARCH_WANT_SYS_SIGPROCMASK 686#define __ARCH_WANT_SYS_SIGPROCMASK
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index a54d714545ff..0061ea263061 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -490,6 +490,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
490 * ACPI based hotplug support for CPU 490 * ACPI based hotplug support for CPU
491 */ 491 */
492#ifdef CONFIG_ACPI_HOTPLUG_CPU 492#ifdef CONFIG_ACPI_HOTPLUG_CPU
493#include <acpi/processor.h>
493 494
494static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 495static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
495{ 496{
@@ -567,6 +568,8 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
567 goto free_new_map; 568 goto free_new_map;
568 } 569 }
569 570
571 acpi_processor_set_pdc(handle);
572
570 cpu = cpumask_first(new_map); 573 cpu = cpumask_first(new_map);
571 acpi_map_cpu2node(handle, cpu, physid); 574 acpi_map_cpu2node(handle, cpu, physid);
572 575
@@ -1293,23 +1296,6 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d)
1293} 1296}
1294 1297
1295/* 1298/*
1296 * Limit ACPI to CPU enumeration for HT
1297 */
1298static int __init force_acpi_ht(const struct dmi_system_id *d)
1299{
1300 if (!acpi_force) {
1301 printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
1302 d->ident);
1303 disable_acpi();
1304 acpi_ht = 1;
1305 } else {
1306 printk(KERN_NOTICE
1307 "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
1308 }
1309 return 0;
1310}
1311
1312/*
1313 * Force ignoring BIOS IRQ0 pin2 override 1299 * Force ignoring BIOS IRQ0 pin2 override
1314 */ 1300 */
1315static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) 1301static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
@@ -1345,82 +1331,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
1345 }, 1331 },
1346 1332
1347 /* 1333 /*
1348 * Boxes that need acpi=ht
1349 */
1350 {
1351 .callback = force_acpi_ht,
1352 .ident = "FSC Primergy T850",
1353 .matches = {
1354 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1355 DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
1356 },
1357 },
1358 {
1359 .callback = force_acpi_ht,
1360 .ident = "HP VISUALIZE NT Workstation",
1361 .matches = {
1362 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
1363 DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
1364 },
1365 },
1366 {
1367 .callback = force_acpi_ht,
1368 .ident = "Compaq Workstation W8000",
1369 .matches = {
1370 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
1371 DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
1372 },
1373 },
1374 {
1375 .callback = force_acpi_ht,
1376 .ident = "ASUS CUR-DLS",
1377 .matches = {
1378 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1379 DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
1380 },
1381 },
1382 {
1383 .callback = force_acpi_ht,
1384 .ident = "ABIT i440BX-W83977",
1385 .matches = {
1386 DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
1387 DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
1388 },
1389 },
1390 {
1391 .callback = force_acpi_ht,
1392 .ident = "IBM Bladecenter",
1393 .matches = {
1394 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1395 DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
1396 },
1397 },
1398 {
1399 .callback = force_acpi_ht,
1400 .ident = "IBM eServer xSeries 360",
1401 .matches = {
1402 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1403 DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
1404 },
1405 },
1406 {
1407 .callback = force_acpi_ht,
1408 .ident = "IBM eserver xSeries 330",
1409 .matches = {
1410 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1411 DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
1412 },
1413 },
1414 {
1415 .callback = force_acpi_ht,
1416 .ident = "IBM eserver xSeries 440",
1417 .matches = {
1418 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1419 DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
1420 },
1421 },
1422
1423 /*
1424 * Boxes that need ACPI PCI IRQ routing disabled 1334 * Boxes that need ACPI PCI IRQ routing disabled
1425 */ 1335 */
1426 { 1336 {
@@ -1652,8 +1562,10 @@ static int __init parse_acpi(char *arg)
1652 } 1562 }
1653 /* Limit ACPI just to boot-time to enable HT */ 1563 /* Limit ACPI just to boot-time to enable HT */
1654 else if (strcmp(arg, "ht") == 0) { 1564 else if (strcmp(arg, "ht") == 0) {
1655 if (!acpi_force) 1565 if (!acpi_force) {
1566 printk(KERN_WARNING "acpi=ht will be removed in Linux-2.6.35\n");
1656 disable_acpi(); 1567 disable_acpi();
1568 }
1657 acpi_ht = 1; 1569 acpi_ht = 1;
1658 } 1570 }
1659 /* acpi=rsdt use RSDT instead of XSDT */ 1571 /* acpi=rsdt use RSDT instead of XSDT */
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index f147a95fd84a..3704997e8b25 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -31,7 +31,6 @@
31#include <asm/x86_init.h> 31#include <asm/x86_init.h>
32 32
33int gart_iommu_aperture; 33int gart_iommu_aperture;
34EXPORT_SYMBOL_GPL(gart_iommu_aperture);
35int gart_iommu_aperture_disabled __initdata; 34int gart_iommu_aperture_disabled __initdata;
36int gart_iommu_aperture_allowed __initdata; 35int gart_iommu_aperture_allowed __initdata;
37 36
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index e3c3d820c325..09d3b17ce0c2 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -223,7 +223,7 @@ struct apic apic_flat = {
223}; 223};
224 224
225/* 225/*
226 * Physflat mode is used when there are more than 8 CPUs on a AMD system. 226 * Physflat mode is used when there are more than 8 CPUs on a system.
227 * We cannot use logical delivery in this case because the mask 227 * We cannot use logical delivery in this case because the mask
228 * overflows, so use physical mode. 228 * overflows, so use physical mode.
229 */ 229 */
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 3740c8a4eae7..49dbeaef2a27 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -120,11 +120,9 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
120unsigned long sn_rtc_cycles_per_second; 120unsigned long sn_rtc_cycles_per_second;
121EXPORT_SYMBOL(sn_rtc_cycles_per_second); 121EXPORT_SYMBOL(sn_rtc_cycles_per_second);
122 122
123/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
124
125static const struct cpumask *uv_target_cpus(void) 123static const struct cpumask *uv_target_cpus(void)
126{ 124{
127 return cpumask_of(0); 125 return cpu_online_mask;
128} 126}
129 127
130static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) 128static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 879666f4d871..7e1cca13af35 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -70,7 +70,8 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
70 if (c->x86_power & (1 << 8)) { 70 if (c->x86_power & (1 << 8)) {
71 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 71 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
72 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 72 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
73 sched_clock_stable = 1; 73 if (!check_tsc_unstable())
74 sched_clock_stable = 1;
74 } 75 }
75 76
76 /* 77 /*
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index eddb1bdd1b8f..b3eeb66c0a51 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -903,7 +903,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
903 return ret; 903 return ret;
904} 904}
905 905
906static struct sysfs_ops sysfs_ops = { 906static const struct sysfs_ops sysfs_ops = {
907 .show = show, 907 .show = show,
908 .store = store, 908 .store = store,
909}; 909};
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index a8aacd4b513c..3ab9c886b613 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -46,6 +46,13 @@
46 46
47#include "mce-internal.h" 47#include "mce-internal.h"
48 48
49static DEFINE_MUTEX(mce_read_mutex);
50
51#define rcu_dereference_check_mce(p) \
52 rcu_dereference_check((p), \
53 rcu_read_lock_sched_held() || \
54 lockdep_is_held(&mce_read_mutex))
55
49#define CREATE_TRACE_POINTS 56#define CREATE_TRACE_POINTS
50#include <trace/events/mce.h> 57#include <trace/events/mce.h>
51 58
@@ -158,7 +165,7 @@ void mce_log(struct mce *mce)
158 mce->finished = 0; 165 mce->finished = 0;
159 wmb(); 166 wmb();
160 for (;;) { 167 for (;;) {
161 entry = rcu_dereference(mcelog.next); 168 entry = rcu_dereference_check_mce(mcelog.next);
162 for (;;) { 169 for (;;) {
163 /* 170 /*
164 * When the buffer fills up discard new entries. 171 * When the buffer fills up discard new entries.
@@ -1485,8 +1492,6 @@ static void collect_tscs(void *data)
1485 rdtscll(cpu_tsc[smp_processor_id()]); 1492 rdtscll(cpu_tsc[smp_processor_id()]);
1486} 1493}
1487 1494
1488static DEFINE_MUTEX(mce_read_mutex);
1489
1490static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, 1495static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
1491 loff_t *off) 1496 loff_t *off)
1492{ 1497{
@@ -1500,7 +1505,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
1500 return -ENOMEM; 1505 return -ENOMEM;
1501 1506
1502 mutex_lock(&mce_read_mutex); 1507 mutex_lock(&mce_read_mutex);
1503 next = rcu_dereference(mcelog.next); 1508 next = rcu_dereference_check_mce(mcelog.next);
1504 1509
1505 /* Only supports full reads right now */ 1510 /* Only supports full reads right now */
1506 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { 1511 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
@@ -1565,7 +1570,7 @@ timeout:
1565static unsigned int mce_poll(struct file *file, poll_table *wait) 1570static unsigned int mce_poll(struct file *file, poll_table *wait)
1566{ 1571{
1567 poll_wait(file, &mce_wait, wait); 1572 poll_wait(file, &mce_wait, wait);
1568 if (rcu_dereference(mcelog.next)) 1573 if (rcu_dereference_check_mce(mcelog.next))
1569 return POLLIN | POLLRDNORM; 1574 return POLLIN | POLLRDNORM;
1570 return 0; 1575 return 0;
1571} 1576}
@@ -2044,6 +2049,7 @@ static __init void mce_init_banks(void)
2044 struct mce_bank *b = &mce_banks[i]; 2049 struct mce_bank *b = &mce_banks[i];
2045 struct sysdev_attribute *a = &b->attr; 2050 struct sysdev_attribute *a = &b->attr;
2046 2051
2052 sysfs_attr_init(&a->attr);
2047 a->attr.name = b->attrname; 2053 a->attr.name = b->attrname;
2048 snprintf(b->attrname, ATTR_LEN, "bank%d", i); 2054 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2049 2055
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 83a3d1f4efca..cda932ca3ade 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -388,7 +388,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
388 return ret; 388 return ret;
389} 389}
390 390
391static struct sysfs_ops threshold_ops = { 391static const struct sysfs_ops threshold_ops = {
392 .show = show, 392 .show = show,
393 .store = store, 393 .store = store,
394}; 394};
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 7c785634af2b..d15df6e49bf0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -95,7 +95,7 @@ static void cmci_discover(int banks, int boot)
95 95
96 /* Already owned by someone else? */ 96 /* Already owned by someone else? */
97 if (val & CMCI_EN) { 97 if (val & CMCI_EN) {
98 if (test_and_clear_bit(i, owned) || boot) 98 if (test_and_clear_bit(i, owned) && !boot)
99 print_update("SHD", &hdr, i); 99 print_update("SHD", &hdr, i);
100 __clear_bit(i, __get_cpu_var(mce_poll_banks)); 100 __clear_bit(i, __get_cpu_var(mce_poll_banks));
101 continue; 101 continue;
@@ -107,7 +107,7 @@ static void cmci_discover(int banks, int boot)
107 107
108 /* Did the enable bit stick? -- the bank supports CMCI */ 108 /* Did the enable bit stick? -- the bank supports CMCI */
109 if (val & CMCI_EN) { 109 if (val & CMCI_EN) {
110 if (!test_and_set_bit(i, owned) || boot) 110 if (!test_and_set_bit(i, owned) && !boot)
111 print_update("CMCI", &hdr, i); 111 print_update("CMCI", &hdr, i);
112 __clear_bit(i, __get_cpu_var(mce_poll_banks)); 112 __clear_bit(i, __get_cpu_var(mce_poll_banks));
113 } else { 113 } else {
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b1fbdeecf6c9..42aafd11e170 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -73,10 +73,10 @@ struct debug_store {
73struct event_constraint { 73struct event_constraint {
74 union { 74 union {
75 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 75 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76 u64 idxmsk64[1]; 76 u64 idxmsk64;
77 }; 77 };
78 int code; 78 u64 code;
79 int cmask; 79 u64 cmask;
80 int weight; 80 int weight;
81}; 81};
82 82
@@ -103,7 +103,7 @@ struct cpu_hw_events {
103}; 103};
104 104
105#define __EVENT_CONSTRAINT(c, n, m, w) {\ 105#define __EVENT_CONSTRAINT(c, n, m, w) {\
106 { .idxmsk64[0] = (n) }, \ 106 { .idxmsk64 = (n) }, \
107 .code = (c), \ 107 .code = (c), \
108 .cmask = (m), \ 108 .cmask = (m), \
109 .weight = (w), \ 109 .weight = (w), \
@@ -116,7 +116,7 @@ struct cpu_hw_events {
116 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) 116 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
117 117
118#define FIXED_EVENT_CONSTRAINT(c, n) \ 118#define FIXED_EVENT_CONSTRAINT(c, n) \
119 EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) 119 EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
120 120
121#define EVENT_CONSTRAINT_END \ 121#define EVENT_CONSTRAINT_END \
122 EVENT_CONSTRAINT(0, 0, 0) 122 EVENT_CONSTRAINT(0, 0, 0)
@@ -503,6 +503,9 @@ static int __hw_perf_event_init(struct perf_event *event)
503 */ 503 */
504 if (attr->type == PERF_TYPE_RAW) { 504 if (attr->type == PERF_TYPE_RAW) {
505 hwc->config |= x86_pmu.raw_event(attr->config); 505 hwc->config |= x86_pmu.raw_event(attr->config);
506 if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
507 perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
508 return -EACCES;
506 return 0; 509 return 0;
507 } 510 }
508 511
@@ -553,9 +556,9 @@ static void x86_pmu_disable_all(void)
553 if (!test_bit(idx, cpuc->active_mask)) 556 if (!test_bit(idx, cpuc->active_mask))
554 continue; 557 continue;
555 rdmsrl(x86_pmu.eventsel + idx, val); 558 rdmsrl(x86_pmu.eventsel + idx, val);
556 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) 559 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
557 continue; 560 continue;
558 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 561 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
559 wrmsrl(x86_pmu.eventsel + idx, val); 562 wrmsrl(x86_pmu.eventsel + idx, val);
560 } 563 }
561} 564}
@@ -590,7 +593,7 @@ static void x86_pmu_enable_all(void)
590 continue; 593 continue;
591 594
592 val = event->hw.config; 595 val = event->hw.config;
593 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 596 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
594 wrmsrl(x86_pmu.eventsel + idx, val); 597 wrmsrl(x86_pmu.eventsel + idx, val);
595 } 598 }
596} 599}
@@ -612,8 +615,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
612 bitmap_zero(used_mask, X86_PMC_IDX_MAX); 615 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
613 616
614 for (i = 0; i < n; i++) { 617 for (i = 0; i < n; i++) {
615 constraints[i] = 618 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
616 x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); 619 constraints[i] = c;
617 } 620 }
618 621
619 /* 622 /*
@@ -853,7 +856,7 @@ void hw_perf_enable(void)
853static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) 856static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
854{ 857{
855 (void)checking_wrmsrl(hwc->config_base + idx, 858 (void)checking_wrmsrl(hwc->config_base + idx,
856 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); 859 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
857} 860}
858 861
859static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) 862static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
@@ -1094,8 +1097,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1094 int idx, handled = 0; 1097 int idx, handled = 0;
1095 u64 val; 1098 u64 val;
1096 1099
1097 data.addr = 0; 1100 perf_sample_data_init(&data, 0);
1098 data.raw = NULL;
1099 1101
1100 cpuc = &__get_cpu_var(cpu_hw_events); 1102 cpuc = &__get_cpu_var(cpu_hw_events);
1101 1103
@@ -1347,6 +1349,7 @@ static void __init pmu_check_apic(void)
1347 1349
1348void __init init_hw_perf_events(void) 1350void __init init_hw_perf_events(void)
1349{ 1351{
1352 struct event_constraint *c;
1350 int err; 1353 int err;
1351 1354
1352 pr_info("Performance Events: "); 1355 pr_info("Performance Events: ");
@@ -1395,6 +1398,16 @@ void __init init_hw_perf_events(void)
1395 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 1398 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
1396 0, x86_pmu.num_events); 1399 0, x86_pmu.num_events);
1397 1400
1401 if (x86_pmu.event_constraints) {
1402 for_each_event_constraint(c, x86_pmu.event_constraints) {
1403 if (c->cmask != INTEL_ARCH_FIXED_MASK)
1404 continue;
1405
1406 c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
1407 c->weight += x86_pmu.num_events;
1408 }
1409 }
1410
1398 pr_info("... version: %d\n", x86_pmu.version); 1411 pr_info("... version: %d\n", x86_pmu.version);
1399 pr_info("... bit width: %d\n", x86_pmu.event_bits); 1412 pr_info("... bit width: %d\n", x86_pmu.event_bits);
1400 pr_info("... generic registers: %d\n", x86_pmu.num_events); 1413 pr_info("... generic registers: %d\n", x86_pmu.num_events);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 977e7544738c..44b60c852107 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1,7 +1,7 @@
1#ifdef CONFIG_CPU_SUP_INTEL 1#ifdef CONFIG_CPU_SUP_INTEL
2 2
3/* 3/*
4 * Intel PerfMon v3. Used on Core2 and later. 4 * Intel PerfMon, used on Core and later.
5 */ 5 */
6static const u64 intel_perfmon_event_map[] = 6static const u64 intel_perfmon_event_map[] =
7{ 7{
@@ -27,8 +27,14 @@ static struct event_constraint intel_core_event_constraints[] =
27 27
28static struct event_constraint intel_core2_event_constraints[] = 28static struct event_constraint intel_core2_event_constraints[] =
29{ 29{
30 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ 30 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
31 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ 31 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
32 /*
33 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
34 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
35 * ratio between these counters.
36 */
37 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
32 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ 38 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
33 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
34 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
@@ -37,14 +43,16 @@ static struct event_constraint intel_core2_event_constraints[] =
37 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ 43 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
38 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ 44 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
39 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ 45 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
46 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
40 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ 47 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
41 EVENT_CONSTRAINT_END 48 EVENT_CONSTRAINT_END
42}; 49};
43 50
44static struct event_constraint intel_nehalem_event_constraints[] = 51static struct event_constraint intel_nehalem_event_constraints[] =
45{ 52{
46 FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ 53 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
47 FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ 54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
48 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ 56 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
49 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ 57 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
50 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ 58 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
@@ -58,8 +66,9 @@ static struct event_constraint intel_nehalem_event_constraints[] =
58 66
59static struct event_constraint intel_westmere_event_constraints[] = 67static struct event_constraint intel_westmere_event_constraints[] =
60{ 68{
61 FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ 69 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
62 FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ 70 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
63 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ 72 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
64 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ 73 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
65 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ 74 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
@@ -68,8 +77,9 @@ static struct event_constraint intel_westmere_event_constraints[] =
68 77
69static struct event_constraint intel_gen_event_constraints[] = 78static struct event_constraint intel_gen_event_constraints[] =
70{ 79{
71 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ 80 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
72 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ 81 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
82 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
73 EVENT_CONSTRAINT_END 83 EVENT_CONSTRAINT_END
74}; 84};
75 85
@@ -580,10 +590,9 @@ static void intel_pmu_drain_bts_buffer(void)
580 590
581 ds->bts_index = ds->bts_buffer_base; 591 ds->bts_index = ds->bts_buffer_base;
582 592
593 perf_sample_data_init(&data, 0);
583 594
584 data.period = event->hw.last_period; 595 data.period = event->hw.last_period;
585 data.addr = 0;
586 data.raw = NULL;
587 regs.ip = 0; 596 regs.ip = 0;
588 597
589 /* 598 /*
@@ -732,8 +741,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
732 int bit, loops; 741 int bit, loops;
733 u64 ack, status; 742 u64 ack, status;
734 743
735 data.addr = 0; 744 perf_sample_data_init(&data, 0);
736 data.raw = NULL;
737 745
738 cpuc = &__get_cpu_var(cpu_hw_events); 746 cpuc = &__get_cpu_var(cpu_hw_events);
739 747
@@ -935,7 +943,7 @@ static __init int intel_pmu_init(void)
935 x86_pmu.event_constraints = intel_nehalem_event_constraints; 943 x86_pmu.event_constraints = intel_nehalem_event_constraints;
936 pr_cont("Nehalem/Corei7 events, "); 944 pr_cont("Nehalem/Corei7 events, ");
937 break; 945 break;
938 case 28: 946 case 28: /* Atom */
939 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 947 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
940 sizeof(hw_cache_event_ids)); 948 sizeof(hw_cache_event_ids));
941 949
@@ -951,6 +959,7 @@ static __init int intel_pmu_init(void)
951 x86_pmu.event_constraints = intel_westmere_event_constraints; 959 x86_pmu.event_constraints = intel_westmere_event_constraints;
952 pr_cont("Westmere events, "); 960 pr_cont("Westmere events, ");
953 break; 961 break;
962
954 default: 963 default:
955 /* 964 /*
956 * default constraints for v2 and up 965 * default constraints for v2 and up
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 1ca5ba078afd..a4e67b99d91c 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -62,7 +62,7 @@ static void p6_pmu_disable_all(void)
62 62
63 /* p6 only has one enable register */ 63 /* p6 only has one enable register */
64 rdmsrl(MSR_P6_EVNTSEL0, val); 64 rdmsrl(MSR_P6_EVNTSEL0, val);
65 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 65 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
66 wrmsrl(MSR_P6_EVNTSEL0, val); 66 wrmsrl(MSR_P6_EVNTSEL0, val);
67} 67}
68 68
@@ -72,7 +72,7 @@ static void p6_pmu_enable_all(void)
72 72
73 /* p6 only has one enable register */ 73 /* p6 only has one enable register */
74 rdmsrl(MSR_P6_EVNTSEL0, val); 74 rdmsrl(MSR_P6_EVNTSEL0, val);
75 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 75 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
76 wrmsrl(MSR_P6_EVNTSEL0, val); 76 wrmsrl(MSR_P6_EVNTSEL0, val);
77} 77}
78 78
@@ -83,7 +83,7 @@ p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
83 u64 val = P6_NOP_EVENT; 83 u64 val = P6_NOP_EVENT;
84 84
85 if (cpuc->enabled) 85 if (cpuc->enabled)
86 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 86 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
87 87
88 (void)checking_wrmsrl(hwc->config_base + idx, val); 88 (void)checking_wrmsrl(hwc->config_base + idx, val);
89} 89}
@@ -95,7 +95,7 @@ static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
95 95
96 val = hwc->config; 96 val = hwc->config;
97 if (cpuc->enabled) 97 if (cpuc->enabled)
98 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 98 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
99 99
100 (void)checking_wrmsrl(hwc->config_base + idx, val); 100 (void)checking_wrmsrl(hwc->config_base + idx, val);
101} 101}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 74f4e85a5727..fb329e9f8494 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -680,7 +680,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
680 cpu_nmi_set_wd_enabled(); 680 cpu_nmi_set_wd_enabled();
681 681
682 apic_write(APIC_LVTPC, APIC_DM_NMI); 682 apic_write(APIC_LVTPC, APIC_DM_NMI);
683 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; 683 evntsel |= ARCH_PERFMON_EVENTSEL_ENABLE;
684 wrmsr(evntsel_msr, evntsel, 0); 684 wrmsr(evntsel_msr, evntsel, 0);
685 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); 685 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
686 return 1; 686 return 1;
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index dce99abb4496..d5e2a2ebb627 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -120,9 +120,15 @@ fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
120{ 120{
121#ifdef CONFIG_FRAME_POINTER 121#ifdef CONFIG_FRAME_POINTER
122 struct stack_frame *frame = (struct stack_frame *)bp; 122 struct stack_frame *frame = (struct stack_frame *)bp;
123 unsigned long next;
123 124
124 if (!in_irq_stack(stack, irq_stack, irq_stack_end)) 125 if (!in_irq_stack(stack, irq_stack, irq_stack_end)) {
125 return (unsigned long)frame->next_frame; 126 if (!probe_kernel_address(&frame->next_frame, next))
127 return next;
128 else
129 WARN_ONCE(1, "Perf: bad frame pointer = %p in "
130 "callchain\n", &frame->next_frame);
131 }
126#endif 132#endif
127 return bp; 133 return bp;
128} 134}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 2d8b5035371c..3d1e6f16b7a6 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -27,7 +27,7 @@
27#define GET_CR2_INTO_RCX movq %cr2, %rcx 27#define GET_CR2_INTO_RCX movq %cr2, %rcx
28#endif 28#endif
29 29
30/* we are not able to switch in one step to the final KERNEL ADRESS SPACE 30/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
31 * because we need identity-mapped pages. 31 * because we need identity-mapped pages.
32 * 32 *
33 */ 33 */
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index dca2802c666f..d6cc065f519f 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -344,13 +344,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
344 } 344 }
345 345
346 /* 346 /*
347 * For kernel-addresses, either the address or symbol name can be
348 * specified.
349 */
350 if (info->name)
351 info->address = (unsigned long)
352 kallsyms_lookup_name(info->name);
353 /*
354 * Check that the low-order bits of the address are appropriate 347 * Check that the low-order bits of the address are appropriate
355 * for the alignment implied by len. 348 * for the alignment implied by len.
356 */ 349 */
@@ -535,8 +528,3 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
535{ 528{
536 /* TODO */ 529 /* TODO */
537} 530}
538
539void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
540{
541 /* TODO */
542}
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c
index cbc4332a77b2..9b895464dd03 100644
--- a/arch/x86/kernel/k8.c
+++ b/arch/x86/kernel/k8.c
@@ -121,3 +121,17 @@ void k8_flush_garts(void)
121} 121}
122EXPORT_SYMBOL_GPL(k8_flush_garts); 122EXPORT_SYMBOL_GPL(k8_flush_garts);
123 123
124static __init int init_k8_nbs(void)
125{
126 int err = 0;
127
128 err = cache_k8_northbridges();
129
130 if (err < 0)
131 printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n");
132
133 return err;
134}
135
136/* This has to go after the PCI subsystem */
137fs_initcall(init_k8_nbs);
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 2bbde6078143..fb99f7edb341 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -1309,7 +1309,7 @@ static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
1309/* 1309/*
1310 * get_tce_space_from_tar(): 1310 * get_tce_space_from_tar():
1311 * Function for kdump case. Get the tce tables from first kernel 1311 * Function for kdump case. Get the tce tables from first kernel
1312 * by reading the contents of the base adress register of calgary iommu 1312 * by reading the contents of the base address register of calgary iommu
1313 */ 1313 */
1314static void __init get_tce_space_from_tar(void) 1314static void __init get_tce_space_from_tar(void)
1315{ 1315{
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 1aa966c565f9..a4ac764a6880 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -38,7 +38,7 @@ int iommu_detected __read_mostly = 0;
38 * This variable becomes 1 if iommu=pt is passed on the kernel command line. 38 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
39 * If this variable is 1, IOMMU implementations do no DMA translation for 39 * If this variable is 1, IOMMU implementations do no DMA translation for
40 * devices and allow every device to access to whole physical memory. This is 40 * devices and allow every device to access to whole physical memory. This is
41 * useful if a user want to use an IOMMU only for KVM device assignment to 41 * useful if a user wants to use an IOMMU only for KVM device assignment to
42 * guests and not for driver dma translation. 42 * guests and not for driver dma translation.
43 */ 43 */
44int iommu_pass_through __read_mostly; 44int iommu_pass_through __read_mostly;
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 34de53b46f87..f3af115a573a 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -735,7 +735,7 @@ int __init gart_iommu_init(void)
735 unsigned long scratch; 735 unsigned long scratch;
736 long i; 736 long i;
737 737
738 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) 738 if (num_k8_northbridges == 0)
739 return 0; 739 return 0;
740 740
741#ifndef CONFIG_AGP_AMD64 741#ifndef CONFIG_AGP_AMD64
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 02d678065d7d..ad9540676fcc 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -607,7 +607,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
607{ 607{
608#ifdef CONFIG_SMP 608#ifdef CONFIG_SMP
609 if (pm_idle == poll_idle && smp_num_siblings > 1) { 609 if (pm_idle == poll_idle && smp_num_siblings > 1) {
610 printk(KERN_WARNING "WARNING: polling idle and HT enabled," 610 printk_once(KERN_WARNING "WARNING: polling idle and HT enabled,"
611 " performance may degrade.\n"); 611 " performance may degrade.\n");
612 } 612 }
613#endif 613#endif
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 2d96aab82a48..a503b1fd04e5 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -581,7 +581,7 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
581 struct perf_event_attr attr; 581 struct perf_event_attr attr;
582 582
583 /* 583 /*
584 * We shoud have at least an inactive breakpoint at this 584 * We should have at least an inactive breakpoint at this
585 * slot. It means the user is writing dr7 without having 585 * slot. It means the user is writing dr7 without having
586 * written the address register first 586 * written the address register first
587 */ 587 */
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
index dee1ff7cba58..196552bb412c 100644
--- a/arch/x86/kernel/sys_i386_32.c
+++ b/arch/x86/kernel/sys_i386_32.c
@@ -25,191 +25,6 @@
25#include <asm/syscalls.h> 25#include <asm/syscalls.h>
26 26
27/* 27/*
28 * Perform the select(nd, in, out, ex, tv) and mmap() system
29 * calls. Linux/i386 didn't use to be able to handle more than
30 * 4 system call parameters, so these system calls used a memory
31 * block for parameter passing..
32 */
33
34struct mmap_arg_struct {
35 unsigned long addr;
36 unsigned long len;
37 unsigned long prot;
38 unsigned long flags;
39 unsigned long fd;
40 unsigned long offset;
41};
42
43asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
44{
45 struct mmap_arg_struct a;
46 int err = -EFAULT;
47
48 if (copy_from_user(&a, arg, sizeof(a)))
49 goto out;
50
51 err = -EINVAL;
52 if (a.offset & ~PAGE_MASK)
53 goto out;
54
55 err = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags,
56 a.fd, a.offset >> PAGE_SHIFT);
57out:
58 return err;
59}
60
61
62struct sel_arg_struct {
63 unsigned long n;
64 fd_set __user *inp, *outp, *exp;
65 struct timeval __user *tvp;
66};
67
68asmlinkage int old_select(struct sel_arg_struct __user *arg)
69{
70 struct sel_arg_struct a;
71
72 if (copy_from_user(&a, arg, sizeof(a)))
73 return -EFAULT;
74 /* sys_select() does the appropriate kernel locking */
75 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
76}
77
78/*
79 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
80 *
81 * This is really horribly ugly.
82 */
83asmlinkage int sys_ipc(uint call, int first, int second,
84 int third, void __user *ptr, long fifth)
85{
86 int version, ret;
87
88 version = call >> 16; /* hack for backward compatibility */
89 call &= 0xffff;
90
91 switch (call) {
92 case SEMOP:
93 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
94 case SEMTIMEDOP:
95 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
96 (const struct timespec __user *)fifth);
97
98 case SEMGET:
99 return sys_semget(first, second, third);
100 case SEMCTL: {
101 union semun fourth;
102 if (!ptr)
103 return -EINVAL;
104 if (get_user(fourth.__pad, (void __user * __user *) ptr))
105 return -EFAULT;
106 return sys_semctl(first, second, third, fourth);
107 }
108
109 case MSGSND:
110 return sys_msgsnd(first, (struct msgbuf __user *) ptr,
111 second, third);
112 case MSGRCV:
113 switch (version) {
114 case 0: {
115 struct ipc_kludge tmp;
116 if (!ptr)
117 return -EINVAL;
118
119 if (copy_from_user(&tmp,
120 (struct ipc_kludge __user *) ptr,
121 sizeof(tmp)))
122 return -EFAULT;
123 return sys_msgrcv(first, tmp.msgp, second,
124 tmp.msgtyp, third);
125 }
126 default:
127 return sys_msgrcv(first,
128 (struct msgbuf __user *) ptr,
129 second, fifth, third);
130 }
131 case MSGGET:
132 return sys_msgget((key_t) first, second);
133 case MSGCTL:
134 return sys_msgctl(first, second, (struct msqid_ds __user *) ptr);
135
136 case SHMAT:
137 switch (version) {
138 default: {
139 ulong raddr;
140 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
141 if (ret)
142 return ret;
143 return put_user(raddr, (ulong __user *) third);
144 }
145 case 1: /* iBCS2 emulator entry point */
146 if (!segment_eq(get_fs(), get_ds()))
147 return -EINVAL;
148 /* The "(ulong *) third" is valid _only_ because of the kernel segment thing */
149 return do_shmat(first, (char __user *) ptr, second, (ulong *) third);
150 }
151 case SHMDT:
152 return sys_shmdt((char __user *)ptr);
153 case SHMGET:
154 return sys_shmget(first, second, third);
155 case SHMCTL:
156 return sys_shmctl(first, second,
157 (struct shmid_ds __user *) ptr);
158 default:
159 return -ENOSYS;
160 }
161}
162
163/*
164 * Old cruft
165 */
166asmlinkage int sys_uname(struct old_utsname __user *name)
167{
168 int err;
169 if (!name)
170 return -EFAULT;
171 down_read(&uts_sem);
172 err = copy_to_user(name, utsname(), sizeof(*name));
173 up_read(&uts_sem);
174 return err? -EFAULT:0;
175}
176
177asmlinkage int sys_olduname(struct oldold_utsname __user *name)
178{
179 int error;
180
181 if (!name)
182 return -EFAULT;
183 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
184 return -EFAULT;
185
186 down_read(&uts_sem);
187
188 error = __copy_to_user(&name->sysname, &utsname()->sysname,
189 __OLD_UTS_LEN);
190 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
191 error |= __copy_to_user(&name->nodename, &utsname()->nodename,
192 __OLD_UTS_LEN);
193 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
194 error |= __copy_to_user(&name->release, &utsname()->release,
195 __OLD_UTS_LEN);
196 error |= __put_user(0, name->release + __OLD_UTS_LEN);
197 error |= __copy_to_user(&name->version, &utsname()->version,
198 __OLD_UTS_LEN);
199 error |= __put_user(0, name->version + __OLD_UTS_LEN);
200 error |= __copy_to_user(&name->machine, &utsname()->machine,
201 __OLD_UTS_LEN);
202 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
203
204 up_read(&uts_sem);
205
206 error = error ? -EFAULT : 0;
207
208 return error;
209}
210
211
212/*
213 * Do a system call from kernel instead of calling sys_execve so we 28 * Do a system call from kernel instead of calling sys_execve so we
214 * end up with proper pt_regs. 29 * end up with proper pt_regs.
215 */ 30 */
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 8aa2057efd12..ff14a5044ce6 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -209,15 +209,3 @@ bottomup:
209 209
210 return addr; 210 return addr;
211} 211}
212
213
214SYSCALL_DEFINE1(uname, struct new_utsname __user *, name)
215{
216 int err;
217 down_read(&uts_sem);
218 err = copy_to_user(name, utsname(), sizeof(*name));
219 up_read(&uts_sem);
220 if (personality(current->personality) == PER_LINUX32)
221 err |= copy_to_user(&name->machine, "i686", 5);
222 return err ? -EFAULT : 0;
223}
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 15228b5d3eb7..8b3729341216 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -81,7 +81,7 @@ ENTRY(sys_call_table)
81 .long sys_settimeofday 81 .long sys_settimeofday
82 .long sys_getgroups16 /* 80 */ 82 .long sys_getgroups16 /* 80 */
83 .long sys_setgroups16 83 .long sys_setgroups16
84 .long old_select 84 .long sys_old_select
85 .long sys_symlink 85 .long sys_symlink
86 .long sys_lstat 86 .long sys_lstat
87 .long sys_readlink /* 85 */ 87 .long sys_readlink /* 85 */
@@ -89,7 +89,7 @@ ENTRY(sys_call_table)
89 .long sys_swapon 89 .long sys_swapon
90 .long sys_reboot 90 .long sys_reboot
91 .long sys_old_readdir 91 .long sys_old_readdir
92 .long old_mmap /* 90 */ 92 .long sys_old_mmap /* 90 */
93 .long sys_munmap 93 .long sys_munmap
94 .long sys_truncate 94 .long sys_truncate
95 .long sys_ftruncate 95 .long sys_ftruncate
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 208a857c679f..9faf91ae1841 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -50,7 +50,7 @@ u64 native_sched_clock(void)
50 * unstable. We do this because unlike Time Of Day, 50 * unstable. We do this because unlike Time Of Day,
51 * the scheduler clock tolerates small errors and it's 51 * the scheduler clock tolerates small errors and it's
52 * very important for it to be as fast as the platform 52 * very important for it to be as fast as the platform
53 * can achive it. ) 53 * can achieve it. )
54 */ 54 */
55 if (unlikely(tsc_disabled)) { 55 if (unlikely(tsc_disabled)) {
56 /* No locking but a rare wrong value is not a big deal: */ 56 /* No locking but a rare wrong value is not a big deal: */
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 2f1ca5614292..5e1ff66ecd73 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -167,7 +167,7 @@ static int vmi_timer_next_event(unsigned long delta,
167{ 167{
168 /* Unfortunately, set_next_event interface only passes relative 168 /* Unfortunately, set_next_event interface only passes relative
169 * expiry, but we want absolute expiry. It'd be better if were 169 * expiry, but we want absolute expiry. It'd be better if were
170 * were passed an aboslute expiry, since a bunch of time may 170 * were passed an absolute expiry, since a bunch of time may
171 * have been stolen between the time the delta is computed and 171 * have been stolen between the time the delta is computed and
172 * when we set the alarm below. */ 172 * when we set the alarm below. */
173 cycle_t now = vmi_timer_ops.get_cycle_counter(vmi_counter(VMI_ONESHOT)); 173 cycle_t now = vmi_timer_ops.get_cycle_counter(vmi_counter(VMI_ONESHOT));
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 1d4eb93d333c..cf07c26d9a4a 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -291,8 +291,29 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
291 */ 291 */
292 if (kernel_set_to_readonly && 292 if (kernel_set_to_readonly &&
293 within(address, (unsigned long)_text, 293 within(address, (unsigned long)_text,
294 (unsigned long)__end_rodata_hpage_align)) 294 (unsigned long)__end_rodata_hpage_align)) {
295 pgprot_val(forbidden) |= _PAGE_RW; 295 unsigned int level;
296
297 /*
298 * Don't enforce the !RW mapping for the kernel text mapping,
299 * if the current mapping is already using small page mapping.
300 * No need to work hard to preserve large page mappings in this
301 * case.
302 *
303 * This also fixes the Linux Xen paravirt guest boot failure
304 * (because of unexpected read-only mappings for kernel identity
305 * mappings). In this paravirt guest case, the kernel text
306 * mapping and the kernel identity mapping share the same
307 * page-table pages. Thus we can't really use different
308 * protections for the kernel text and identity mappings. Also,
309 * these shared mappings are made of small page mappings.
310 * Thus this don't enforce !RW mapping for small page kernel
311 * text mapping logic will help Linux Xen parvirt guest boot
312 * aswell.
313 */
314 if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
315 pgprot_val(forbidden) |= _PAGE_RW;
316 }
296#endif 317#endif
297 318
298 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); 319 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 6a58256dce9f..090cbbec7dbd 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -46,17 +46,6 @@
46 46
47static unsigned long reset_value[NUM_VIRT_COUNTERS]; 47static unsigned long reset_value[NUM_VIRT_COUNTERS];
48 48
49/* IbsFetchCtl bits/masks */
50#define IBS_FETCH_RAND_EN (1ULL<<57)
51#define IBS_FETCH_VAL (1ULL<<49)
52#define IBS_FETCH_ENABLE (1ULL<<48)
53#define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
54
55/* IbsOpCtl bits */
56#define IBS_OP_CNT_CTL (1ULL<<19)
57#define IBS_OP_VAL (1ULL<<18)
58#define IBS_OP_ENABLE (1ULL<<17)
59
60#define IBS_FETCH_SIZE 6 49#define IBS_FETCH_SIZE 6
61#define IBS_OP_SIZE 12 50#define IBS_OP_SIZE 12
62 51
@@ -182,7 +171,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
182 continue; 171 continue;
183 } 172 }
184 rdmsrl(msrs->controls[i].addr, val); 173 rdmsrl(msrs->controls[i].addr, val);
185 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) 174 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
186 op_x86_warn_in_use(i); 175 op_x86_warn_in_use(i);
187 val &= model->reserved; 176 val &= model->reserved;
188 wrmsrl(msrs->controls[i].addr, val); 177 wrmsrl(msrs->controls[i].addr, val);
@@ -290,7 +279,7 @@ op_amd_handle_ibs(struct pt_regs * const regs,
290 oprofile_write_commit(&entry); 279 oprofile_write_commit(&entry);
291 280
292 /* reenable the IRQ */ 281 /* reenable the IRQ */
293 ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK); 282 ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT);
294 ctl |= IBS_FETCH_ENABLE; 283 ctl |= IBS_FETCH_ENABLE;
295 wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl); 284 wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
296 } 285 }
@@ -330,7 +319,7 @@ static inline void op_amd_start_ibs(void)
330 return; 319 return;
331 320
332 if (ibs_config.fetch_enabled) { 321 if (ibs_config.fetch_enabled) {
333 val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; 322 val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT;
334 val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; 323 val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
335 val |= IBS_FETCH_ENABLE; 324 val |= IBS_FETCH_ENABLE;
336 wrmsrl(MSR_AMD64_IBSFETCHCTL, val); 325 wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
@@ -352,7 +341,7 @@ static inline void op_amd_start_ibs(void)
352 * avoid underflows. 341 * avoid underflows.
353 */ 342 */
354 ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET, 343 ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET,
355 0xFFFFULL); 344 IBS_OP_MAX_CNT);
356 } 345 }
357 if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops) 346 if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops)
358 ibs_op_ctl |= IBS_OP_CNT_CTL; 347 ibs_op_ctl |= IBS_OP_CNT_CTL;
@@ -409,7 +398,7 @@ static void op_amd_start(struct op_msrs const * const msrs)
409 if (!reset_value[op_x86_phys_to_virt(i)]) 398 if (!reset_value[op_x86_phys_to_virt(i)])
410 continue; 399 continue;
411 rdmsrl(msrs->controls[i].addr, val); 400 rdmsrl(msrs->controls[i].addr, val);
412 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 401 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
413 wrmsrl(msrs->controls[i].addr, val); 402 wrmsrl(msrs->controls[i].addr, val);
414 } 403 }
415 404
@@ -429,7 +418,7 @@ static void op_amd_stop(struct op_msrs const * const msrs)
429 if (!reset_value[op_x86_phys_to_virt(i)]) 418 if (!reset_value[op_x86_phys_to_virt(i)])
430 continue; 419 continue;
431 rdmsrl(msrs->controls[i].addr, val); 420 rdmsrl(msrs->controls[i].addr, val);
432 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 421 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
433 wrmsrl(msrs->controls[i].addr, val); 422 wrmsrl(msrs->controls[i].addr, val);
434 } 423 }
435 424
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 5d1727ba409e..2bf90fafa7b5 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -88,7 +88,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
88 continue; 88 continue;
89 } 89 }
90 rdmsrl(msrs->controls[i].addr, val); 90 rdmsrl(msrs->controls[i].addr, val);
91 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) 91 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
92 op_x86_warn_in_use(i); 92 op_x86_warn_in_use(i);
93 val &= model->reserved; 93 val &= model->reserved;
94 wrmsrl(msrs->controls[i].addr, val); 94 wrmsrl(msrs->controls[i].addr, val);
@@ -166,7 +166,7 @@ static void ppro_start(struct op_msrs const * const msrs)
166 for (i = 0; i < num_counters; ++i) { 166 for (i = 0; i < num_counters; ++i) {
167 if (reset_value[i]) { 167 if (reset_value[i]) {
168 rdmsrl(msrs->controls[i].addr, val); 168 rdmsrl(msrs->controls[i].addr, val);
169 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 169 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
170 wrmsrl(msrs->controls[i].addr, val); 170 wrmsrl(msrs->controls[i].addr, val);
171 } 171 }
172 } 172 }
@@ -184,7 +184,7 @@ static void ppro_stop(struct op_msrs const * const msrs)
184 if (!reset_value[i]) 184 if (!reset_value[i])
185 continue; 185 continue;
186 rdmsrl(msrs->controls[i].addr, val); 186 rdmsrl(msrs->controls[i].addr, val);
187 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 187 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
188 wrmsrl(msrs->controls[i].addr, val); 188 wrmsrl(msrs->controls[i].addr, val);
189 } 189 }
190} 190}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 563d20504988..deafb65ef44e 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -361,7 +361,7 @@ static void xen_cpu_die(unsigned int cpu)
361 alternatives_smp_switch(0); 361 alternatives_smp_switch(0);
362} 362}
363 363
364static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */ 364static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
365{ 365{
366 play_dead_common(); 366 play_dead_common();
367 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 367 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);