aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/blackfin/Kconfig4
-rw-r--r--arch/blackfin/configs/BF548-EZKIT_defconfig13
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/blackfin/include/asm/atomic.h104
-rw-r--r--arch/blackfin/include/asm/mutex.h1
-rw-r--r--arch/blackfin/include/asm/uaccess.h6
-rw-r--r--arch/blackfin/kernel/Makefile2
-rw-r--r--arch/blackfin/kernel/kgdb_test.c3
-rw-r--r--arch/blackfin/kernel/time-ts.c6
-rw-r--r--arch/blackfin/kernel/time.c1
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c3
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c88
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c2
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c2
-rw-r--r--arch/blackfin/mach-bf561/smp.c2
-rw-r--r--arch/blackfin/mach-common/smp.c7
-rw-r--r--arch/m32r/Kconfig1
-rw-r--r--arch/mn10300/Kconfig3
-rw-r--r--arch/s390/kernel/time.c13
-rw-r--r--arch/tile/Kconfig3
-rw-r--r--arch/tile/configs/tilegx_defconfig1
-rw-r--r--arch/tile/configs/tilepro_defconfig1
-rw-r--r--arch/um/defconfig1
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/dwarf2.h2
-rw-r--r--arch/x86/include/asm/irq_vectors.h12
-rw-r--r--arch/x86/include/asm/nmi.h37
-rw-r--r--arch/x86/include/asm/perf_event.h55
-rw-r--r--arch/x86/include/asm/reboot.h2
-rw-r--r--arch/x86/include/asm/unistd_64.h1
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c20
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c27
-rw-r--r--arch/x86/kernel/apic/io_apic.c394
-rw-r--r--arch/x86/kernel/apic/probe_32.c10
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c20
-rw-r--r--arch/x86/kernel/cpu/Makefile7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c20
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c23
-rw-r--r--arch/x86/kernel/cpu/perf_event.c442
-rw-r--r--arch/x86/kernel/cpu/perf_event.h505
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c38
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c294
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c146
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c79
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c28
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c9
-rw-r--r--arch/x86/kernel/crash.c5
-rw-r--r--arch/x86/kernel/entry_64.S14
-rw-r--r--arch/x86/kernel/jump_label.c2
-rw-r--r--arch/x86/kernel/kgdb.c60
-rw-r--r--arch/x86/kernel/kprobes.c7
-rw-r--r--arch/x86/kernel/nmi.c433
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/reboot.c23
-rw-r--r--arch/x86/kernel/traps.c155
-rw-r--r--arch/x86/lib/insn.c48
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/x86/oprofile/nmi_int.c40
-rw-r--r--arch/x86/oprofile/nmi_timer_int.c28
-rw-r--r--arch/x86/oprofile/op_model_amd.c234
-rw-r--r--arch/x86/oprofile/op_model_ppro.c27
-rw-r--r--arch/x86/oprofile/op_x86_model.h1
-rw-r--r--arch/xtensa/configs/iss_defconfig1
-rw-r--r--arch/xtensa/configs/s6105_defconfig1
72 files changed, 2023 insertions, 1533 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 795126ea4935..8090cad0dd52 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -346,7 +346,6 @@ config ARCH_GEMINI
346config ARCH_PRIMA2 346config ARCH_PRIMA2
347 bool "CSR SiRFSoC PRIMA2 ARM Cortex A9 Platform" 347 bool "CSR SiRFSoC PRIMA2 ARM Cortex A9 Platform"
348 select CPU_V7 348 select CPU_V7
349 select GENERIC_TIME
350 select NO_IOPORT 349 select NO_IOPORT
351 select GENERIC_CLOCKEVENTS 350 select GENERIC_CLOCKEVENTS
352 select CLKDEV_LOOKUP 351 select CLKDEV_LOOKUP
@@ -520,7 +519,6 @@ config ARCH_LPC32XX
520 select ARM_AMBA 519 select ARM_AMBA
521 select USB_ARCH_HAS_OHCI 520 select USB_ARCH_HAS_OHCI
522 select CLKDEV_LOOKUP 521 select CLKDEV_LOOKUP
523 select GENERIC_TIME
524 select GENERIC_CLOCKEVENTS 522 select GENERIC_CLOCKEVENTS
525 help 523 help
526 Support for the NXP LPC32XX family of processors 524 Support for the NXP LPC32XX family of processors
@@ -599,7 +597,6 @@ config ARCH_TEGRA
599 bool "NVIDIA Tegra" 597 bool "NVIDIA Tegra"
600 select CLKDEV_LOOKUP 598 select CLKDEV_LOOKUP
601 select CLKSRC_MMIO 599 select CLKSRC_MMIO
602 select GENERIC_TIME
603 select GENERIC_CLOCKEVENTS 600 select GENERIC_CLOCKEVENTS
604 select GENERIC_GPIO 601 select GENERIC_GPIO
605 select HAVE_CLK 602 select HAVE_CLK
@@ -914,7 +911,6 @@ config ARCH_VT8500
914config ARCH_ZYNQ 911config ARCH_ZYNQ
915 bool "Xilinx Zynq ARM Cortex A9 Platform" 912 bool "Xilinx Zynq ARM Cortex A9 Platform"
916 select CPU_V7 913 select CPU_V7
917 select GENERIC_TIME
918 select GENERIC_CLOCKEVENTS 914 select GENERIC_CLOCKEVENTS
919 select CLKDEV_LOOKUP 915 select CLKDEV_LOOKUP
920 select ARM_GIC 916 select ARM_GIC
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index c7476295de80..abe5a9e85148 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -248,10 +248,6 @@ config HOTPLUG_CPU
248 depends on SMP && HOTPLUG 248 depends on SMP && HOTPLUG
249 default y 249 default y
250 250
251config HAVE_LEGACY_PER_CPU_AREA
252 def_bool y
253 depends on SMP
254
255config BF_REV_MIN 251config BF_REV_MIN
256 int 252 int
257 default 0 if (BF51x || BF52x || (BF54x && !BF54xM)) 253 default 0 if (BF51x || BF52x || (BF54x && !BF54xM))
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index 56151b5dbc44..0e6d841b5d01 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -4,7 +4,6 @@ CONFIG_IKCONFIG=y
4CONFIG_IKCONFIG_PROC=y 4CONFIG_IKCONFIG_PROC=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_BLK_DEV_INITRD=y 6CONFIG_BLK_DEV_INITRD=y
7# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
8CONFIG_EXPERT=y 7CONFIG_EXPERT=y
9# CONFIG_SYSCTL_SYSCALL is not set 8# CONFIG_SYSCTL_SYSCALL is not set
10# CONFIG_ELF_CORE is not set 9# CONFIG_ELF_CORE is not set
@@ -40,7 +39,6 @@ CONFIG_EBIU_MODEVAL=0x1
40CONFIG_EBIU_FCTLVAL=0x6 39CONFIG_EBIU_FCTLVAL=0x6
41CONFIG_BINFMT_FLAT=y 40CONFIG_BINFMT_FLAT=y
42CONFIG_BINFMT_ZFLAT=y 41CONFIG_BINFMT_ZFLAT=y
43CONFIG_PM=y
44CONFIG_NET=y 42CONFIG_NET=y
45CONFIG_PACKET=y 43CONFIG_PACKET=y
46CONFIG_UNIX=y 44CONFIG_UNIX=y
@@ -55,7 +53,6 @@ CONFIG_IP_PNP=y
55CONFIG_CAN=m 53CONFIG_CAN=m
56CONFIG_CAN_RAW=m 54CONFIG_CAN_RAW=m
57CONFIG_CAN_BCM=m 55CONFIG_CAN_BCM=m
58CONFIG_CAN_DEV=m
59CONFIG_CAN_BFIN=m 56CONFIG_CAN_BFIN=m
60CONFIG_IRDA=m 57CONFIG_IRDA=m
61CONFIG_IRLAN=m 58CONFIG_IRLAN=m
@@ -67,7 +64,6 @@ CONFIG_BFIN_SIR3=y
67CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 64CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
68CONFIG_FW_LOADER=m 65CONFIG_FW_LOADER=m
69CONFIG_MTD=y 66CONFIG_MTD=y
70CONFIG_MTD_PARTITIONS=y
71CONFIG_MTD_CMDLINE_PARTS=y 67CONFIG_MTD_CMDLINE_PARTS=y
72CONFIG_MTD_CHAR=y 68CONFIG_MTD_CHAR=y
73CONFIG_MTD_BLOCK=y 69CONFIG_MTD_BLOCK=y
@@ -105,12 +101,12 @@ CONFIG_INPUT_TOUCHSCREEN=y
105CONFIG_TOUCHSCREEN_AD7877=m 101CONFIG_TOUCHSCREEN_AD7877=m
106CONFIG_INPUT_MISC=y 102CONFIG_INPUT_MISC=y
107# CONFIG_SERIO is not set 103# CONFIG_SERIO is not set
108# CONFIG_DEVKMEM is not set 104# CONFIG_LEGACY_PTYS is not set
109CONFIG_BFIN_JTAG_COMM=m 105CONFIG_BFIN_JTAG_COMM=m
106# CONFIG_DEVKMEM is not set
110CONFIG_SERIAL_BFIN=y 107CONFIG_SERIAL_BFIN=y
111CONFIG_SERIAL_BFIN_CONSOLE=y 108CONFIG_SERIAL_BFIN_CONSOLE=y
112CONFIG_SERIAL_BFIN_UART1=y 109CONFIG_SERIAL_BFIN_UART1=y
113# CONFIG_LEGACY_PTYS is not set
114# CONFIG_HW_RANDOM is not set 110# CONFIG_HW_RANDOM is not set
115CONFIG_I2C=y 111CONFIG_I2C=y
116CONFIG_I2C_CHARDEV=y 112CONFIG_I2C_CHARDEV=y
@@ -163,6 +159,7 @@ CONFIG_USB_DEVICEFS=y
163CONFIG_USB_OTG_BLACKLIST_HUB=y 159CONFIG_USB_OTG_BLACKLIST_HUB=y
164CONFIG_USB_MON=y 160CONFIG_USB_MON=y
165CONFIG_USB_MUSB_HDRC=y 161CONFIG_USB_MUSB_HDRC=y
162CONFIG_USB_MUSB_BLACKFIN=y
166CONFIG_USB_STORAGE=y 163CONFIG_USB_STORAGE=y
167CONFIG_MMC=y 164CONFIG_MMC=y
168CONFIG_MMC_BLOCK=m 165CONFIG_MMC_BLOCK=m
@@ -185,8 +182,6 @@ CONFIG_NFS_FS=m
185CONFIG_NFS_V3=y 182CONFIG_NFS_V3=y
186CONFIG_NFSD=m 183CONFIG_NFSD=m
187CONFIG_NFSD_V3=y 184CONFIG_NFSD_V3=y
188CONFIG_SMB_FS=m
189CONFIG_SMB_NLS_DEFAULT=y
190CONFIG_CIFS=y 185CONFIG_CIFS=y
191CONFIG_NLS_CODEPAGE_437=m 186CONFIG_NLS_CODEPAGE_437=m
192CONFIG_NLS_CODEPAGE_936=m 187CONFIG_NLS_CODEPAGE_936=m
@@ -196,7 +191,6 @@ CONFIG_DEBUG_KERNEL=y
196CONFIG_DEBUG_SHIRQ=y 191CONFIG_DEBUG_SHIRQ=y
197CONFIG_DETECT_HUNG_TASK=y 192CONFIG_DETECT_HUNG_TASK=y
198CONFIG_DEBUG_INFO=y 193CONFIG_DEBUG_INFO=y
199# CONFIG_RCU_CPU_STALL_DETECTOR is not set
200# CONFIG_FTRACE is not set 194# CONFIG_FTRACE is not set
201CONFIG_DEBUG_MMRS=y 195CONFIG_DEBUG_MMRS=y
202CONFIG_DEBUG_HWERR=y 196CONFIG_DEBUG_HWERR=y
@@ -206,5 +200,4 @@ CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
206CONFIG_EARLY_PRINTK=y 200CONFIG_EARLY_PRINTK=y
207CONFIG_CPLB_INFO=y 201CONFIG_CPLB_INFO=y
208CONFIG_BFIN_PSEUDODBG_INSNS=y 202CONFIG_BFIN_PSEUDODBG_INSNS=y
209CONFIG_CRYPTO=y
210# CONFIG_CRYPTO_ANSI_CPRNG is not set 203# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 7a075eaf6041..5a0625aad6a0 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -21,6 +21,7 @@ generic-y += local64.h
21generic-y += local.h 21generic-y += local.h
22generic-y += mman.h 22generic-y += mman.h
23generic-y += msgbuf.h 23generic-y += msgbuf.h
24generic-y += mutex.h
24generic-y += param.h 25generic-y += param.h
25generic-y += percpu.h 26generic-y += percpu.h
26generic-y += pgalloc.h 27generic-y += pgalloc.h
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index 135225696fd2..54c6e2887e9f 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2004-2009 Analog Devices Inc. 2 * Copyright 2004-2011 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -7,111 +7,27 @@
7#ifndef __ARCH_BLACKFIN_ATOMIC__ 7#ifndef __ARCH_BLACKFIN_ATOMIC__
8#define __ARCH_BLACKFIN_ATOMIC__ 8#define __ARCH_BLACKFIN_ATOMIC__
9 9
10#ifndef CONFIG_SMP 10#ifdef CONFIG_SMP
11# include <asm-generic/atomic.h>
12#else
13 11
14#include <linux/types.h> 12#include <linux/linkage.h>
15#include <asm/system.h> /* local_irq_XXX() */
16
17/*
18 * Atomic operations that C can't guarantee us. Useful for
19 * resource counting etc..
20 */
21
22#define ATOMIC_INIT(i) { (i) }
23#define atomic_set(v, i) (((v)->counter) = i)
24
25#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
26 13
27asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); 14asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
28
29asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); 15asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
30
31asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value); 16asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
32
33asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value); 17asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
34
35asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value); 18asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
36
37asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); 19asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
38 20
39static inline void atomic_add(int i, atomic_t *v) 21#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
40{
41 __raw_atomic_update_asm(&v->counter, i);
42}
43
44static inline void atomic_sub(int i, atomic_t *v)
45{
46 __raw_atomic_update_asm(&v->counter, -i);
47}
48
49static inline int atomic_add_return(int i, atomic_t *v)
50{
51 return __raw_atomic_update_asm(&v->counter, i);
52}
53
54static inline int atomic_sub_return(int i, atomic_t *v)
55{
56 return __raw_atomic_update_asm(&v->counter, -i);
57}
58 22
59static inline void atomic_inc(volatile atomic_t *v) 23#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i)
60{ 24#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i))
61 __raw_atomic_update_asm(&v->counter, 1);
62}
63
64static inline void atomic_dec(volatile atomic_t *v)
65{
66 __raw_atomic_update_asm(&v->counter, -1);
67}
68
69static inline void atomic_clear_mask(int mask, atomic_t *v)
70{
71 __raw_atomic_clear_asm(&v->counter, mask);
72}
73
74static inline void atomic_set_mask(int mask, atomic_t *v)
75{
76 __raw_atomic_set_asm(&v->counter, mask);
77}
78
79/* Atomic operations are already serializing */
80#define smp_mb__before_atomic_dec() barrier()
81#define smp_mb__after_atomic_dec() barrier()
82#define smp_mb__before_atomic_inc() barrier()
83#define smp_mb__after_atomic_inc() barrier()
84
85#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
86#define atomic_dec_return(v) atomic_sub_return(1,(v))
87#define atomic_inc_return(v) atomic_add_return(1,(v))
88
89#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
90#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
91
92#define __atomic_add_unless(v, a, u) \
93({ \
94 int c, old; \
95 c = atomic_read(v); \
96 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
97 c = old; \
98 c; \
99})
100
101/*
102 * atomic_inc_and_test - increment and test
103 * @v: pointer of type atomic_t
104 *
105 * Atomically increments @v by 1
106 * and returns true if the result is zero, or false for all
107 * other cases.
108 */
109#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
110
111#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
112#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
113 25
26#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m)
27#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m)
114 28
115#endif 29#endif
116 30
31#include <asm-generic/atomic.h>
32
117#endif 33#endif
diff --git a/arch/blackfin/include/asm/mutex.h b/arch/blackfin/include/asm/mutex.h
deleted file mode 100644
index ff6101aa2c71..000000000000
--- a/arch/blackfin/include/asm/mutex.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/mutex-dec.h>
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index 1c0d190adaef..5cc111502822 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -195,17 +195,17 @@ static inline unsigned long __must_check
195copy_from_user(void *to, const void __user *from, unsigned long n) 195copy_from_user(void *to, const void __user *from, unsigned long n)
196{ 196{
197 if (access_ok(VERIFY_READ, from, n)) 197 if (access_ok(VERIFY_READ, from, n))
198 memcpy(to, from, n); 198 memcpy(to, (const void __force *)from, n);
199 else 199 else
200 return n; 200 return n;
201 return 0; 201 return 0;
202} 202}
203 203
204static inline unsigned long __must_check 204static inline unsigned long __must_check
205copy_to_user(void *to, const void __user *from, unsigned long n) 205copy_to_user(void __user *to, const void *from, unsigned long n)
206{ 206{
207 if (access_ok(VERIFY_WRITE, to, n)) 207 if (access_ok(VERIFY_WRITE, to, n))
208 memcpy(to, from, n); 208 memcpy((void __force *)to, from, n);
209 else 209 else
210 return n; 210 return n;
211 return 0; 211 return 0;
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index b7bdc42fe1a3..1f88edd4572a 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -38,6 +38,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o
38 38
39# the kgdb test puts code into L2 and without linker 39# the kgdb test puts code into L2 and without linker
40# relaxation, we need to force long calls to/from it 40# relaxation, we need to force long calls to/from it
41CFLAGS_kgdb_test.o := -mlong-calls -O0 41CFLAGS_kgdb_test.o := -mlong-calls
42 42
43obj-$(CONFIG_DEBUG_MMRS) += debug-mmrs.o 43obj-$(CONFIG_DEBUG_MMRS) += debug-mmrs.o
diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c
index 2a6e9dbb62a5..4a7dcfea98af 100644
--- a/arch/blackfin/kernel/kgdb_test.c
+++ b/arch/blackfin/kernel/kgdb_test.c
@@ -50,8 +50,7 @@ void kgdb_l2_test(void)
50 50
51#endif 51#endif
52 52
53 53noinline int kgdb_test(char *name, int len, int count, int z)
54int kgdb_test(char *name, int len, int count, int z)
55{ 54{
56 pr_alert("kgdb name(%d): %s, %d, %d\n", len, name, count, z); 55 pr_alert("kgdb name(%d): %s, %d, %d\n", len, name, count, z);
57 count = z; 56 count = z;
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 9e9b60d969dc..1bcf3a3c57d8 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -188,8 +188,7 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
188 188
189static struct irqaction gptmr0_irq = { 189static struct irqaction gptmr0_irq = {
190 .name = "Blackfin GPTimer0", 190 .name = "Blackfin GPTimer0",
191 .flags = IRQF_DISABLED | IRQF_TIMER | \ 191 .flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU,
192 IRQF_IRQPOLL | IRQF_PERCPU,
193 .handler = bfin_gptmr0_interrupt, 192 .handler = bfin_gptmr0_interrupt,
194}; 193};
195 194
@@ -297,8 +296,7 @@ irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
297 296
298static struct irqaction coretmr_irq = { 297static struct irqaction coretmr_irq = {
299 .name = "Blackfin CoreTimer", 298 .name = "Blackfin CoreTimer",
300 .flags = IRQF_DISABLED | IRQF_TIMER | \ 299 .flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU,
301 IRQF_IRQPOLL | IRQF_PERCPU,
302 .handler = bfin_coretmr_interrupt, 300 .handler = bfin_coretmr_interrupt,
303}; 301};
304 302
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index ceb2bf63dfe2..2310b249675f 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -25,7 +25,6 @@
25 25
26static struct irqaction bfin_timer_irq = { 26static struct irqaction bfin_timer_irq = {
27 .name = "Blackfin Timer Tick", 27 .name = "Blackfin Timer Tick",
28 .flags = IRQF_DISABLED
29}; 28};
30 29
31#if defined(CONFIG_IPIPE) 30#if defined(CONFIG_IPIPE)
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index eb325ed6607e..5da5787fc4ef 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -54,7 +54,8 @@ static struct resource dm9000_resources[] = {
54 [2] = { 54 [2] = {
55 .start = IRQ_PF10, 55 .start = IRQ_PF10,
56 .end = IRQ_PF10, 56 .end = IRQ_PF10,
57 .flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IRQF_SHARED | IRQF_TRIGGER_HIGH), 57 .flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE |
58 IORESOURCE_IRQ_SHAREABLE),
58 }, 59 },
59}; 60};
60 61
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index 44fd8409db10..9fb20d6d8f91 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -605,7 +605,7 @@ static struct platform_device bfin_mac_device = {
605 605
606static struct pata_platform_info bfin_pata_platform_data = { 606static struct pata_platform_info bfin_pata_platform_data = {
607 .ioport_shift = 2, 607 .ioport_shift = 2,
608 .irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED, 608 .irq_type = IRQF_TRIGGER_HIGH,
609}; 609};
610 610
611static struct resource bfin_pata_resources[] = { 611static struct resource bfin_pata_resources[] = {
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index 1b4ac5c64aae..5ba389fc61ae 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -570,7 +570,7 @@ static struct platform_device bfin_mac_device = {
570 570
571static struct pata_platform_info bfin_pata_platform_data = { 571static struct pata_platform_info bfin_pata_platform_data = {
572 .ioport_shift = 2, 572 .ioport_shift = 2,
573 .irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED, 573 .irq_type = IRQF_TRIGGER_HIGH,
574}; 574};
575 575
576static struct resource bfin_pata_resources[] = { 576static struct resource bfin_pata_resources[] = {
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index b52e6728f64f..6c916a67ef68 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -962,10 +962,10 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
962 }, 962 },
963#endif 963#endif
964 964
965#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ 965#if defined(CONFIG_SND_BF5XX_SOC_AD1836) \
966 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 966 || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
967 { 967 {
968 .modalias = "ad183x", 968 .modalias = "ad1836",
969 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 969 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
970 .bus_num = 0, 970 .bus_num = 0,
971 .chip_select = 4, 971 .chip_select = 4,
@@ -984,9 +984,9 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
984 }, 984 },
985#endif 985#endif
986 986
987#if defined(CONFIG_SND_BF5XX_SOC_ADAV80X) || defined(CONFIG_SND_BF5XX_SOC_ADAV80X_MODULE) 987#if defined(CONFIG_SND_SOC_ADAV80X) || defined(CONFIG_SND_SOC_ADV80X_MODULE)
988 { 988 {
989 .modalias = "adav80x", 989 .modalias = "adav801",
990 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 990 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
991 .bus_num = 0, 991 .bus_num = 0,
992 .chip_select = 1, 992 .chip_select = 1,
@@ -2101,7 +2101,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
2101 }, 2101 },
2102#endif 2102#endif
2103 2103
2104#if defined(CONFIG_SND_BF5XX_SOC_ADAV80X) || defined(CONFIG_SND_BF5XX_SOC_ADAV80X_MODULE) 2104#if defined(CONFIG_SND_SOC_ADAV80X) || defined(CONFIG_SND_SOC_ADAV80X_MODULE)
2105 { 2105 {
2106 I2C_BOARD_INFO("adav803", 0x10), 2106 I2C_BOARD_INFO("adav803", 0x10),
2107 }, 2107 },
@@ -2134,23 +2134,6 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
2134 }, 2134 },
2135#endif 2135#endif
2136 2136
2137#if defined(CONFIG_AD7414) || defined(CONFIG_AD7414_MODULE)
2138 {
2139 I2C_BOARD_INFO("ad7414", 0x9),
2140 .irq = IRQ_PG5,
2141 .irq_flags = IRQF_TRIGGER_LOW,
2142 },
2143#endif
2144
2145#if defined(CONFIG_AD7416) || defined(CONFIG_AD7416_MODULE)
2146 {
2147 I2C_BOARD_INFO("ad7417", 0xb),
2148 .irq = IRQ_PG5,
2149 .irq_flags = IRQF_TRIGGER_LOW,
2150 .platform_data = (void *)GPIO_PF4,
2151 },
2152#endif
2153
2154#if defined(CONFIG_ADE7854_I2C) || defined(CONFIG_ADE7854_I2C_MODULE) 2137#if defined(CONFIG_ADE7854_I2C) || defined(CONFIG_ADE7854_I2C_MODULE)
2155 { 2138 {
2156 I2C_BOARD_INFO("ade7854", 0x38), 2139 I2C_BOARD_INFO("ade7854", 0x38),
@@ -2161,15 +2144,6 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
2161 { 2144 {
2162 I2C_BOARD_INFO("adt75", 0x9), 2145 I2C_BOARD_INFO("adt75", 0x9),
2163 .irq = IRQ_PG5, 2146 .irq = IRQ_PG5,
2164 .irq_flags = IRQF_TRIGGER_LOW,
2165 },
2166#endif
2167
2168#if defined(CONFIG_ADT7408) || defined(CONFIG_ADT7408_MODULE)
2169 {
2170 I2C_BOARD_INFO("adt7408", 0x18),
2171 .irq = IRQ_PG5,
2172 .irq_flags = IRQF_TRIGGER_LOW,
2173 }, 2147 },
2174#endif 2148#endif
2175 2149
@@ -2178,7 +2152,6 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
2178 I2C_BOARD_INFO("adt7410", 0x48), 2152 I2C_BOARD_INFO("adt7410", 0x48),
2179 /* CT critical temperature event. line 0 */ 2153 /* CT critical temperature event. line 0 */
2180 .irq = IRQ_PG5, 2154 .irq = IRQ_PG5,
2181 .irq_flags = IRQF_TRIGGER_LOW,
2182 .platform_data = (void *)&adt7410_platform_data, 2155 .platform_data = (void *)&adt7410_platform_data,
2183 }, 2156 },
2184#endif 2157#endif
@@ -2187,7 +2160,6 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
2187 { 2160 {
2188 I2C_BOARD_INFO("ad7291", 0x20), 2161 I2C_BOARD_INFO("ad7291", 0x20),
2189 .irq = IRQ_PG5, 2162 .irq = IRQ_PG5,
2190 .irq_flags = IRQF_TRIGGER_LOW,
2191 }, 2163 },
2192#endif 2164#endif
2193 2165
@@ -2275,6 +2247,11 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
2275 I2C_BOARD_INFO("adau1361", 0x38), 2247 I2C_BOARD_INFO("adau1361", 0x38),
2276 }, 2248 },
2277#endif 2249#endif
2250#if defined(CONFIG_SND_SOC_ADAU1701) || defined(CONFIG_SND_SOC_ADAU1701_MODULE)
2251 {
2252 I2C_BOARD_INFO("adau1701", 0x34),
2253 },
2254#endif
2278#if defined(CONFIG_AD525X_DPOT) || defined(CONFIG_AD525X_DPOT_MODULE) 2255#if defined(CONFIG_AD525X_DPOT) || defined(CONFIG_AD525X_DPOT_MODULE)
2279 { 2256 {
2280 I2C_BOARD_INFO("ad5258", 0x18), 2257 I2C_BOARD_INFO("ad5258", 0x18),
@@ -2388,7 +2365,7 @@ static struct platform_device bfin_sport1_uart_device = {
2388#define PATA_INT IRQ_PF5 2365#define PATA_INT IRQ_PF5
2389static struct pata_platform_info bfin_pata_platform_data = { 2366static struct pata_platform_info bfin_pata_platform_data = {
2390 .ioport_shift = 1, 2367 .ioport_shift = 1,
2391 .irq_flags = IRQF_TRIGGER_HIGH | IRQF_DISABLED, 2368 .irq_flags = IRQF_TRIGGER_HIGH,
2392}; 2369};
2393 2370
2394static struct resource bfin_pata_resources[] = { 2371static struct resource bfin_pata_resources[] = {
@@ -2540,13 +2517,21 @@ static struct platform_device bfin_ac97_pcm = {
2540}; 2517};
2541#endif 2518#endif
2542 2519
2543#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE) 2520#if defined(CONFIG_SND_SOC_AD73311) || defined(CONFIG_SND_SOC_AD73311_MODULE)
2544static struct platform_device bfin_ad73311_codec_device = { 2521static struct platform_device bfin_ad73311_codec_device = {
2545 .name = "ad73311", 2522 .name = "ad73311",
2546 .id = -1, 2523 .id = -1,
2547}; 2524};
2548#endif 2525#endif
2549 2526
2527#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X) || \
2528 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X_MODULE)
2529static struct platform_device bfin_eval_adav801_device = {
2530 .name = "bfin-eval-adav801",
2531 .id = -1,
2532};
2533#endif
2534
2550#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE) 2535#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
2551static struct platform_device bfin_i2s = { 2536static struct platform_device bfin_i2s = {
2552 .name = "bfin-i2s", 2537 .name = "bfin-i2s",
@@ -2661,6 +2646,20 @@ static struct platform_device iio_gpio_trigger = {
2661}; 2646};
2662#endif 2647#endif
2663 2648
2649#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373) || \
2650 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373_MODULE)
2651static struct platform_device bf5xx_adau1373_device = {
2652 .name = "bfin-eval-adau1373",
2653};
2654#endif
2655
2656#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701) || \
2657 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701_MODULE)
2658static struct platform_device bf5xx_adau1701_device = {
2659 .name = "bfin-eval-adau1701",
2660};
2661#endif
2662
2664static struct platform_device *stamp_devices[] __initdata = { 2663static struct platform_device *stamp_devices[] __initdata = {
2665 2664
2666 &bfin_dpmc, 2665 &bfin_dpmc,
@@ -2782,7 +2781,7 @@ static struct platform_device *stamp_devices[] __initdata = {
2782 &bfin_ac97_pcm, 2781 &bfin_ac97_pcm,
2783#endif 2782#endif
2784 2783
2785#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE) 2784#if defined(CONFIG_SND_SOC_AD73311) || defined(CONFIG_SND_SOC_AD73311_MODULE)
2786 &bfin_ad73311_codec_device, 2785 &bfin_ad73311_codec_device,
2787#endif 2786#endif
2788 2787
@@ -2821,6 +2820,21 @@ static struct platform_device *stamp_devices[] __initdata = {
2821 defined(CONFIG_IIO_GPIO_TRIGGER_MODULE) 2820 defined(CONFIG_IIO_GPIO_TRIGGER_MODULE)
2822 &iio_gpio_trigger, 2821 &iio_gpio_trigger,
2823#endif 2822#endif
2823
2824#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373) || \
2825 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373_MODULE)
2826 &bf5xx_adau1373_device,
2827#endif
2828
2829#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701) || \
2830 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701_MODULE)
2831 &bf5xx_adau1701_device,
2832#endif
2833
2834#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X) || \
2835 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X_MODULE)
2836 &bfin_eval_adav801_device,
2837#endif
2824}; 2838};
2825 2839
2826static int __init net2272_init(void) 2840static int __init net2272_init(void)
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 9b7287abdfa1..2da0316d890e 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -572,7 +572,7 @@ static struct platform_device bfin_mac_device = {
572 572
573static struct pata_platform_info bfin_pata_platform_data = { 573static struct pata_platform_info bfin_pata_platform_data = {
574 .ioport_shift = 2, 574 .ioport_shift = 2,
575 .irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED, 575 .irq_type = IRQF_TRIGGER_HIGH,
576}; 576};
577 577
578static struct resource bfin_pata_resources[] = { 578static struct resource bfin_pata_resources[] = {
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index e4f397d1d65b..c1b72f2d6354 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -348,7 +348,7 @@ static struct platform_device bfin_sir0_device = {
348 348
349static struct pata_platform_info bfin_pata_platform_data = { 349static struct pata_platform_info bfin_pata_platform_data = {
350 .ioport_shift = 2, 350 .ioport_shift = 2,
351 .irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED, 351 .irq_type = IRQF_TRIGGER_HIGH,
352}; 352};
353 353
354static struct resource bfin_pata_resources[] = { 354static struct resource bfin_pata_resources[] = {
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index 85abd8be1343..db22401e7605 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -114,7 +114,7 @@ void __init platform_request_ipi(int irq, void *handler)
114 int ret; 114 int ret;
115 const char *name = (irq == IRQ_SUPPLE_0) ? supple0 : supple1; 115 const char *name = (irq == IRQ_SUPPLE_0) ? supple0 : supple1;
116 116
117 ret = request_irq(irq, handler, IRQF_DISABLED | IRQF_PERCPU, name, handler); 117 ret = request_irq(irq, handler, IRQF_PERCPU, name, handler);
118 if (ret) 118 if (ret)
119 panic("Cannot request %s for IPI service", name); 119 panic("Cannot request %s for IPI service", name);
120} 120}
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 107622aacf6b..0784a52389c8 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -295,10 +295,15 @@ EXPORT_SYMBOL_GPL(smp_call_function_single);
295 295
296void smp_send_reschedule(int cpu) 296void smp_send_reschedule(int cpu)
297{ 297{
298 cpumask_t callmap;
298 /* simply trigger an ipi */ 299 /* simply trigger an ipi */
299 if (cpu_is_offline(cpu)) 300 if (cpu_is_offline(cpu))
300 return; 301 return;
301 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0); 302
303 cpumask_clear(&callmap);
304 cpumask_set_cpu(cpu, &callmap);
305
306 smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
302 307
303 return; 308 return;
304} 309}
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index b92b9445255d..6c4e9aaa70c1 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -10,6 +10,7 @@ config M32R
10 select HAVE_GENERIC_HARDIRQS 10 select HAVE_GENERIC_HARDIRQS
11 select GENERIC_IRQ_PROBE 11 select GENERIC_IRQ_PROBE
12 select GENERIC_IRQ_SHOW 12 select GENERIC_IRQ_SHOW
13 select GENERIC_ATOMIC64
13 14
14config SBUS 15config SBUS
15 bool 16 bool
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index f093b3a8a4a1..438db84a1f7c 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -47,9 +47,6 @@ config GENERIC_CMOS_UPDATE
47config GENERIC_HWEIGHT 47config GENERIC_HWEIGHT
48 def_bool y 48 def_bool y
49 49
50config GENERIC_TIME
51 def_bool y
52
53config GENERIC_CLOCKEVENTS 50config GENERIC_CLOCKEVENTS
54 def_bool y 51 def_bool y
55 52
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index dff933065ab6..8d65bd0383fc 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -109,10 +109,14 @@ static void fixup_clock_comparator(unsigned long long delta)
109 set_clock_comparator(S390_lowcore.clock_comparator); 109 set_clock_comparator(S390_lowcore.clock_comparator);
110} 110}
111 111
112static int s390_next_event(unsigned long delta, 112static int s390_next_ktime(ktime_t expires,
113 struct clock_event_device *evt) 113 struct clock_event_device *evt)
114{ 114{
115 S390_lowcore.clock_comparator = get_clock() + delta; 115 u64 nsecs;
116
117 nsecs = ktime_to_ns(ktime_sub(expires, ktime_get_monotonic_offset()));
118 do_div(nsecs, 125);
119 S390_lowcore.clock_comparator = TOD_UNIX_EPOCH + (nsecs << 9);
116 set_clock_comparator(S390_lowcore.clock_comparator); 120 set_clock_comparator(S390_lowcore.clock_comparator);
117 return 0; 121 return 0;
118} 122}
@@ -137,14 +141,15 @@ void init_cpu_timer(void)
137 cpu = smp_processor_id(); 141 cpu = smp_processor_id();
138 cd = &per_cpu(comparators, cpu); 142 cd = &per_cpu(comparators, cpu);
139 cd->name = "comparator"; 143 cd->name = "comparator";
140 cd->features = CLOCK_EVT_FEAT_ONESHOT; 144 cd->features = CLOCK_EVT_FEAT_ONESHOT |
145 CLOCK_EVT_FEAT_KTIME;
141 cd->mult = 16777; 146 cd->mult = 16777;
142 cd->shift = 12; 147 cd->shift = 12;
143 cd->min_delta_ns = 1; 148 cd->min_delta_ns = 1;
144 cd->max_delta_ns = LONG_MAX; 149 cd->max_delta_ns = LONG_MAX;
145 cd->rating = 400; 150 cd->rating = 400;
146 cd->cpumask = cpumask_of(cpu); 151 cd->cpumask = cpumask_of(cpu);
147 cd->set_next_event = s390_next_event; 152 cd->set_next_ktime = s390_next_ktime;
148 cd->set_mode = s390_set_mode; 153 cd->set_mode = s390_set_mode;
149 154
150 clockevents_register_device(cd); 155 clockevents_register_device(cd);
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index b30f71ac0d06..70a0de46cd1b 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -46,9 +46,6 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
46config SYS_SUPPORTS_HUGETLBFS 46config SYS_SUPPORTS_HUGETLBFS
47 def_bool y 47 def_bool y
48 48
49config GENERIC_TIME
50 def_bool y
51
52config GENERIC_CLOCKEVENTS 49config GENERIC_CLOCKEVENTS
53 def_bool y 50 def_bool y
54 51
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
index 2ad73fb707b9..dafdbbae1124 100644
--- a/arch/tile/configs/tilegx_defconfig
+++ b/arch/tile/configs/tilegx_defconfig
@@ -11,7 +11,6 @@ CONFIG_HAVE_ARCH_ALLOC_REMAP=y
11CONFIG_HAVE_SETUP_PER_CPU_AREA=y 11CONFIG_HAVE_SETUP_PER_CPU_AREA=y
12CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y 12CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
13CONFIG_SYS_SUPPORTS_HUGETLBFS=y 13CONFIG_SYS_SUPPORTS_HUGETLBFS=y
14CONFIG_GENERIC_TIME=y
15CONFIG_GENERIC_CLOCKEVENTS=y 14CONFIG_GENERIC_CLOCKEVENTS=y
16CONFIG_RWSEM_GENERIC_SPINLOCK=y 15CONFIG_RWSEM_GENERIC_SPINLOCK=y
17CONFIG_DEFAULT_MIGRATION_COST=10000000 16CONFIG_DEFAULT_MIGRATION_COST=10000000
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
index f58dc362b944..6f05f969b564 100644
--- a/arch/tile/configs/tilepro_defconfig
+++ b/arch/tile/configs/tilepro_defconfig
@@ -11,7 +11,6 @@ CONFIG_HAVE_ARCH_ALLOC_REMAP=y
11CONFIG_HAVE_SETUP_PER_CPU_AREA=y 11CONFIG_HAVE_SETUP_PER_CPU_AREA=y
12CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y 12CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
13CONFIG_SYS_SUPPORTS_HUGETLBFS=y 13CONFIG_SYS_SUPPORTS_HUGETLBFS=y
14CONFIG_GENERIC_TIME=y
15CONFIG_GENERIC_CLOCKEVENTS=y 14CONFIG_GENERIC_CLOCKEVENTS=y
16CONFIG_RWSEM_GENERIC_SPINLOCK=y 15CONFIG_RWSEM_GENERIC_SPINLOCK=y
17CONFIG_DEFAULT_MIGRATION_COST=10000000 16CONFIG_DEFAULT_MIGRATION_COST=10000000
diff --git a/arch/um/defconfig b/arch/um/defconfig
index 9f7634f08cf3..761f5e1a657e 100644
--- a/arch/um/defconfig
+++ b/arch/um/defconfig
@@ -13,7 +13,6 @@ CONFIG_LOCKDEP_SUPPORT=y
13# CONFIG_STACKTRACE_SUPPORT is not set 13# CONFIG_STACKTRACE_SUPPORT is not set
14CONFIG_GENERIC_CALIBRATE_DELAY=y 14CONFIG_GENERIC_CALIBRATE_DELAY=y
15CONFIG_GENERIC_BUG=y 15CONFIG_GENERIC_BUG=y
16CONFIG_GENERIC_TIME=y
17CONFIG_GENERIC_CLOCKEVENTS=y 16CONFIG_GENERIC_CLOCKEVENTS=y
18CONFIG_IRQ_RELEASE_METHOD=y 17CONFIG_IRQ_RELEASE_METHOD=y
19CONFIG_HZ=100 18CONFIG_HZ=100
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9037289617ac..f49767716c70 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -64,10 +64,12 @@ config X86
64 select HAVE_TEXT_POKE_SMP 64 select HAVE_TEXT_POKE_SMP
65 select HAVE_GENERIC_HARDIRQS 65 select HAVE_GENERIC_HARDIRQS
66 select HAVE_SPARSE_IRQ 66 select HAVE_SPARSE_IRQ
67 select SPARSE_IRQ
67 select GENERIC_FIND_FIRST_BIT 68 select GENERIC_FIND_FIRST_BIT
68 select GENERIC_IRQ_PROBE 69 select GENERIC_IRQ_PROBE
69 select GENERIC_PENDING_IRQ if SMP 70 select GENERIC_PENDING_IRQ if SMP
70 select GENERIC_IRQ_SHOW 71 select GENERIC_IRQ_SHOW
72 select GENERIC_CLOCKEVENTS_MIN_ADJUST
71 select IRQ_FORCED_THREADING 73 select IRQ_FORCED_THREADING
72 select USE_GENERIC_SMP_HELPERS if SMP 74 select USE_GENERIC_SMP_HELPERS if SMP
73 select HAVE_BPF_JIT if (X86_64 && NET) 75 select HAVE_BPF_JIT if (X86_64 && NET)
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 7b3ca8324b69..9b7273cb2193 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -495,7 +495,7 @@ static inline void default_wait_for_init_deassert(atomic_t *deassert)
495 return; 495 return;
496} 496}
497 497
498extern struct apic *generic_bigsmp_probe(void); 498extern void generic_bigsmp_probe(void);
499 499
500 500
501#ifdef CONFIG_X86_LOCAL_APIC 501#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index 326099199318..f6f15986df6c 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -27,6 +27,7 @@
27#define CFI_REMEMBER_STATE .cfi_remember_state 27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state 28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined 29#define CFI_UNDEFINED .cfi_undefined
30#define CFI_ESCAPE .cfi_escape
30 31
31#ifdef CONFIG_AS_CFI_SIGNAL_FRAME 32#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
32#define CFI_SIGNAL_FRAME .cfi_signal_frame 33#define CFI_SIGNAL_FRAME .cfi_signal_frame
@@ -68,6 +69,7 @@
68#define CFI_REMEMBER_STATE cfi_ignore 69#define CFI_REMEMBER_STATE cfi_ignore
69#define CFI_RESTORE_STATE cfi_ignore 70#define CFI_RESTORE_STATE cfi_ignore
70#define CFI_UNDEFINED cfi_ignore 71#define CFI_UNDEFINED cfi_ignore
72#define CFI_ESCAPE cfi_ignore
71#define CFI_SIGNAL_FRAME cfi_ignore 73#define CFI_SIGNAL_FRAME cfi_ignore
72 74
73#endif 75#endif
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 7e50f06393aa..4b4448761e88 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -160,19 +160,11 @@ static inline int invalid_vm86_irq(int irq)
160#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) 160#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
161 161
162#ifdef CONFIG_X86_IO_APIC 162#ifdef CONFIG_X86_IO_APIC
163# ifdef CONFIG_SPARSE_IRQ 163# define CPU_VECTOR_LIMIT (64 * NR_CPUS)
164# define CPU_VECTOR_LIMIT (64 * NR_CPUS) 164# define NR_IRQS \
165# define NR_IRQS \
166 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ 165 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
167 (NR_VECTORS + CPU_VECTOR_LIMIT) : \ 166 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
168 (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) 167 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
169# else
170# define CPU_VECTOR_LIMIT (32 * NR_CPUS)
171# define NR_IRQS \
172 (CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \
173 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
174 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
175# endif
176#else /* !CONFIG_X86_IO_APIC: */ 168#else /* !CONFIG_X86_IO_APIC: */
177# define NR_IRQS NR_IRQS_LEGACY 169# define NR_IRQS NR_IRQS_LEGACY
178#endif 170#endif
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 4886a68f267e..fd3f9f18cf3f 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -22,27 +22,26 @@ void arch_trigger_all_cpu_backtrace(void);
22#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 22#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
23#endif 23#endif
24 24
25/* 25#define NMI_FLAG_FIRST 1
26 * Define some priorities for the nmi notifier call chain. 26
27 * 27enum {
28 * Create a local nmi bit that has a higher priority than 28 NMI_LOCAL=0,
29 * external nmis, because the local ones are more frequent. 29 NMI_UNKNOWN,
30 * 30 NMI_MAX
31 * Also setup some default high/normal/low settings for 31};
32 * subsystems to registers with. Using 4 bits to separate 32
33 * the priorities. This can go a lot higher if needed be. 33#define NMI_DONE 0
34 */ 34#define NMI_HANDLED 1
35 35
36#define NMI_LOCAL_SHIFT 16 /* randomly picked */ 36typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
37#define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT) 37
38#define NMI_HIGH_PRIOR (1ULL << 8) 38int register_nmi_handler(unsigned int, nmi_handler_t, unsigned long,
39#define NMI_NORMAL_PRIOR (1ULL << 4) 39 const char *);
40#define NMI_LOW_PRIOR (1ULL << 0) 40
41#define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR) 41void unregister_nmi_handler(unsigned int, const char *);
42#define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
43#define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR)
44 42
45void stop_nmi(void); 43void stop_nmi(void);
46void restart_nmi(void); 44void restart_nmi(void);
45void local_touch_nmi(void);
47 46
48#endif /* _ASM_X86_NMI_H */ 47#endif /* _ASM_X86_NMI_H */
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 094fb30817ab..f61c62f7d5d8 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -29,6 +29,9 @@
29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) 29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL 30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
31 31
32#define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40)
33#define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41)
34
32#define AMD64_EVENTSEL_EVENT \ 35#define AMD64_EVENTSEL_EVENT \
33 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) 36 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
34#define INTEL_ARCH_EVENT_MASK \ 37#define INTEL_ARCH_EVENT_MASK \
@@ -43,14 +46,17 @@
43#define AMD64_RAW_EVENT_MASK \ 46#define AMD64_RAW_EVENT_MASK \
44 (X86_RAW_EVENT_MASK | \ 47 (X86_RAW_EVENT_MASK | \
45 AMD64_EVENTSEL_EVENT) 48 AMD64_EVENTSEL_EVENT)
49#define AMD64_NUM_COUNTERS 4
50#define AMD64_NUM_COUNTERS_F15H 6
51#define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H
46 52
47#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 53#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
48#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 54#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
49#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 55#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
50#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ 56#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
51 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) 57 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
52 58
53#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 59#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
54 60
55/* 61/*
56 * Intel "Architectural Performance Monitoring" CPUID 62 * Intel "Architectural Performance Monitoring" CPUID
@@ -110,6 +116,35 @@ union cpuid10_edx {
110 */ 116 */
111#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) 117#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
112 118
119/*
120 * IBS cpuid feature detection
121 */
122
123#define IBS_CPUID_FEATURES 0x8000001b
124
125/*
126 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
127 * bit 0 is used to indicate the existence of IBS.
128 */
129#define IBS_CAPS_AVAIL (1U<<0)
130#define IBS_CAPS_FETCHSAM (1U<<1)
131#define IBS_CAPS_OPSAM (1U<<2)
132#define IBS_CAPS_RDWROPCNT (1U<<3)
133#define IBS_CAPS_OPCNT (1U<<4)
134#define IBS_CAPS_BRNTRGT (1U<<5)
135#define IBS_CAPS_OPCNTEXT (1U<<6)
136
137#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
138 | IBS_CAPS_FETCHSAM \
139 | IBS_CAPS_OPSAM)
140
141/*
142 * IBS APIC setup
143 */
144#define IBSCTL 0x1cc
145#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
146#define IBSCTL_LVT_OFFSET_MASK 0x0F
147
113/* IbsFetchCtl bits/masks */ 148/* IbsFetchCtl bits/masks */
114#define IBS_FETCH_RAND_EN (1ULL<<57) 149#define IBS_FETCH_RAND_EN (1ULL<<57)
115#define IBS_FETCH_VAL (1ULL<<49) 150#define IBS_FETCH_VAL (1ULL<<49)
@@ -124,6 +159,8 @@ union cpuid10_edx {
124#define IBS_OP_MAX_CNT 0x0000FFFFULL 159#define IBS_OP_MAX_CNT 0x0000FFFFULL
125#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ 160#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
126 161
162extern u32 get_ibs_caps(void);
163
127#ifdef CONFIG_PERF_EVENTS 164#ifdef CONFIG_PERF_EVENTS
128extern void perf_events_lapic_init(void); 165extern void perf_events_lapic_init(void);
129 166
@@ -159,7 +196,19 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
159 ); \ 196 ); \
160} 197}
161 198
199struct perf_guest_switch_msr {
200 unsigned msr;
201 u64 host, guest;
202};
203
204extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
162#else 205#else
206static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
207{
208 *nr = 0;
209 return NULL;
210}
211
163static inline void perf_events_lapic_init(void) { } 212static inline void perf_events_lapic_init(void) { }
164#endif 213#endif
165 214
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index 3250e3d605d9..92f297069e87 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -23,7 +23,7 @@ void machine_real_restart(unsigned int type);
23#define MRR_BIOS 0 23#define MRR_BIOS 0
24#define MRR_APM 1 24#define MRR_APM 1
25 25
26typedef void (*nmi_shootdown_cb)(int, struct die_args*); 26typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
27void nmi_shootdown_cpus(nmi_shootdown_cb callback); 27void nmi_shootdown_cpus(nmi_shootdown_cb callback);
28 28
29#endif /* _ASM_X86_REBOOT_H */ 29#endif /* _ASM_X86_REBOOT_H */
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 201040573444..0a6ba337a2eb 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -624,7 +624,6 @@ __SYSCALL(__NR_vmsplice, sys_vmsplice)
624__SYSCALL(__NR_move_pages, sys_move_pages) 624__SYSCALL(__NR_move_pages, sys_move_pages)
625#define __NR_utimensat 280 625#define __NR_utimensat 280
626__SYSCALL(__NR_utimensat, sys_utimensat) 626__SYSCALL(__NR_utimensat, sys_utimensat)
627#define __IGNORE_getcpu /* implemented as a vsyscall */
628#define __NR_epoll_pwait 281 627#define __NR_epoll_pwait 281
629__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait) 628__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait)
630#define __NR_signalfd 282 629#define __NR_signalfd 282
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 82f2912155a5..8baca3c4871c 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -19,7 +19,7 @@ endif
19 19
20obj-y := process_$(BITS).o signal.o entry_$(BITS).o 20obj-y := process_$(BITS).o signal.o entry_$(BITS).o
21obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o 21obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
22obj-y += time.o ioport.o ldt.o dumpstack.o 22obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
23obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o 23obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
24obj-$(CONFIG_IRQ_WORK) += irq_work.o 24obj-$(CONFIG_IRQ_WORK) += irq_work.o
25obj-y += probe_roms.o 25obj-y += probe_roms.o
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index efd737e827f4..521bead01137 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -255,12 +255,24 @@ static struct apic apic_bigsmp = {
255 .x86_32_early_logical_apicid = bigsmp_early_logical_apicid, 255 .x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
256}; 256};
257 257
258struct apic * __init generic_bigsmp_probe(void) 258void __init generic_bigsmp_probe(void)
259{ 259{
260 if (probe_bigsmp()) 260 unsigned int cpu;
261 return &apic_bigsmp;
262 261
263 return NULL; 262 if (!probe_bigsmp())
263 return;
264
265 apic = &apic_bigsmp;
266
267 for_each_possible_cpu(cpu) {
268 if (early_per_cpu(x86_cpu_to_logical_apicid,
269 cpu) == BAD_APICID)
270 continue;
271 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
272 bigsmp_early_logical_apicid(cpu);
273 }
274
275 pr_info("Overriding APIC driver with %s\n", apic_bigsmp.name);
264} 276}
265 277
266apic_driver(apic_bigsmp); 278apic_driver(apic_bigsmp);
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index d5e57db0f7be..31cb9ae992b7 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -60,22 +60,10 @@ void arch_trigger_all_cpu_backtrace(void)
60} 60}
61 61
62static int __kprobes 62static int __kprobes
63arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self, 63arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
64 unsigned long cmd, void *__args)
65{ 64{
66 struct die_args *args = __args;
67 struct pt_regs *regs;
68 int cpu; 65 int cpu;
69 66
70 switch (cmd) {
71 case DIE_NMI:
72 break;
73
74 default:
75 return NOTIFY_DONE;
76 }
77
78 regs = args->regs;
79 cpu = smp_processor_id(); 67 cpu = smp_processor_id();
80 68
81 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { 69 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
@@ -86,21 +74,16 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
86 show_regs(regs); 74 show_regs(regs);
87 arch_spin_unlock(&lock); 75 arch_spin_unlock(&lock);
88 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 76 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
89 return NOTIFY_STOP; 77 return NMI_HANDLED;
90 } 78 }
91 79
92 return NOTIFY_DONE; 80 return NMI_DONE;
93} 81}
94 82
95static __read_mostly struct notifier_block backtrace_notifier = {
96 .notifier_call = arch_trigger_all_cpu_backtrace_handler,
97 .next = NULL,
98 .priority = NMI_LOCAL_LOW_PRIOR,
99};
100
101static int __init register_trigger_all_cpu_backtrace(void) 83static int __init register_trigger_all_cpu_backtrace(void)
102{ 84{
103 register_die_notifier(&backtrace_notifier); 85 register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
86 0, "arch_bt");
104 return 0; 87 return 0;
105} 88}
106early_initcall(register_trigger_all_cpu_backtrace); 89early_initcall(register_trigger_all_cpu_backtrace);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 229e19f3eb57..3c31fa98af6d 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -92,21 +92,21 @@ static struct ioapic {
92 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 92 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
93} ioapics[MAX_IO_APICS]; 93} ioapics[MAX_IO_APICS];
94 94
95#define mpc_ioapic_ver(id) ioapics[id].mp_config.apicver 95#define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver
96 96
97int mpc_ioapic_id(int id) 97int mpc_ioapic_id(int ioapic_idx)
98{ 98{
99 return ioapics[id].mp_config.apicid; 99 return ioapics[ioapic_idx].mp_config.apicid;
100} 100}
101 101
102unsigned int mpc_ioapic_addr(int id) 102unsigned int mpc_ioapic_addr(int ioapic_idx)
103{ 103{
104 return ioapics[id].mp_config.apicaddr; 104 return ioapics[ioapic_idx].mp_config.apicaddr;
105} 105}
106 106
107struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int id) 107struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx)
108{ 108{
109 return &ioapics[id].gsi_config; 109 return &ioapics[ioapic_idx].gsi_config;
110} 110}
111 111
112int nr_ioapics; 112int nr_ioapics;
@@ -186,11 +186,7 @@ static struct irq_pin_list *alloc_irq_pin_list(int node)
186 186
187 187
188/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 188/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
189#ifdef CONFIG_SPARSE_IRQ
190static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY]; 189static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
191#else
192static struct irq_cfg irq_cfgx[NR_IRQS];
193#endif
194 190
195int __init arch_early_irq_init(void) 191int __init arch_early_irq_init(void)
196{ 192{
@@ -234,7 +230,6 @@ int __init arch_early_irq_init(void)
234 return 0; 230 return 0;
235} 231}
236 232
237#ifdef CONFIG_SPARSE_IRQ
238static struct irq_cfg *irq_cfg(unsigned int irq) 233static struct irq_cfg *irq_cfg(unsigned int irq)
239{ 234{
240 return irq_get_chip_data(irq); 235 return irq_get_chip_data(irq);
@@ -269,22 +264,6 @@ static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
269 kfree(cfg); 264 kfree(cfg);
270} 265}
271 266
272#else
273
274struct irq_cfg *irq_cfg(unsigned int irq)
275{
276 return irq < nr_irqs ? irq_cfgx + irq : NULL;
277}
278
279static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
280{
281 return irq_cfgx + irq;
282}
283
284static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { }
285
286#endif
287
288static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) 267static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
289{ 268{
290 int res = irq_alloc_desc_at(at, node); 269 int res = irq_alloc_desc_at(at, node);
@@ -802,13 +781,13 @@ int restore_ioapic_entries(void)
802/* 781/*
803 * Find the IRQ entry number of a certain pin. 782 * Find the IRQ entry number of a certain pin.
804 */ 783 */
805static int find_irq_entry(int apic, int pin, int type) 784static int find_irq_entry(int ioapic_idx, int pin, int type)
806{ 785{
807 int i; 786 int i;
808 787
809 for (i = 0; i < mp_irq_entries; i++) 788 for (i = 0; i < mp_irq_entries; i++)
810 if (mp_irqs[i].irqtype == type && 789 if (mp_irqs[i].irqtype == type &&
811 (mp_irqs[i].dstapic == mpc_ioapic_id(apic) || 790 (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) ||
812 mp_irqs[i].dstapic == MP_APIC_ALL) && 791 mp_irqs[i].dstapic == MP_APIC_ALL) &&
813 mp_irqs[i].dstirq == pin) 792 mp_irqs[i].dstirq == pin)
814 return i; 793 return i;
@@ -847,12 +826,13 @@ static int __init find_isa_irq_apic(int irq, int type)
847 (mp_irqs[i].srcbusirq == irq)) 826 (mp_irqs[i].srcbusirq == irq))
848 break; 827 break;
849 } 828 }
829
850 if (i < mp_irq_entries) { 830 if (i < mp_irq_entries) {
851 int apic; 831 int ioapic_idx;
852 for(apic = 0; apic < nr_ioapics; apic++) { 832
853 if (mpc_ioapic_id(apic) == mp_irqs[i].dstapic) 833 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++)
854 return apic; 834 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic)
855 } 835 return ioapic_idx;
856 } 836 }
857 837
858 return -1; 838 return -1;
@@ -1067,7 +1047,7 @@ static int pin_2_irq(int idx, int apic, int pin)
1067int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, 1047int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
1068 struct io_apic_irq_attr *irq_attr) 1048 struct io_apic_irq_attr *irq_attr)
1069{ 1049{
1070 int apic, i, best_guess = -1; 1050 int ioapic_idx, i, best_guess = -1;
1071 1051
1072 apic_printk(APIC_DEBUG, 1052 apic_printk(APIC_DEBUG,
1073 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", 1053 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
@@ -1080,8 +1060,8 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
1080 for (i = 0; i < mp_irq_entries; i++) { 1060 for (i = 0; i < mp_irq_entries; i++) {
1081 int lbus = mp_irqs[i].srcbus; 1061 int lbus = mp_irqs[i].srcbus;
1082 1062
1083 for (apic = 0; apic < nr_ioapics; apic++) 1063 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++)
1084 if (mpc_ioapic_id(apic) == mp_irqs[i].dstapic || 1064 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic ||
1085 mp_irqs[i].dstapic == MP_APIC_ALL) 1065 mp_irqs[i].dstapic == MP_APIC_ALL)
1086 break; 1066 break;
1087 1067
@@ -1089,13 +1069,13 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
1089 !mp_irqs[i].irqtype && 1069 !mp_irqs[i].irqtype &&
1090 (bus == lbus) && 1070 (bus == lbus) &&
1091 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { 1071 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
1092 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); 1072 int irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq);
1093 1073
1094 if (!(apic || IO_APIC_IRQ(irq))) 1074 if (!(ioapic_idx || IO_APIC_IRQ(irq)))
1095 continue; 1075 continue;
1096 1076
1097 if (pin == (mp_irqs[i].srcbusirq & 3)) { 1077 if (pin == (mp_irqs[i].srcbusirq & 3)) {
1098 set_io_apic_irq_attr(irq_attr, apic, 1078 set_io_apic_irq_attr(irq_attr, ioapic_idx,
1099 mp_irqs[i].dstirq, 1079 mp_irqs[i].dstirq,
1100 irq_trigger(i), 1080 irq_trigger(i),
1101 irq_polarity(i)); 1081 irq_polarity(i));
@@ -1106,7 +1086,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
1106 * best-guess fuzzy result for broken mptables. 1086 * best-guess fuzzy result for broken mptables.
1107 */ 1087 */
1108 if (best_guess < 0) { 1088 if (best_guess < 0) {
1109 set_io_apic_irq_attr(irq_attr, apic, 1089 set_io_apic_irq_attr(irq_attr, ioapic_idx,
1110 mp_irqs[i].dstirq, 1090 mp_irqs[i].dstirq,
1111 irq_trigger(i), 1091 irq_trigger(i),
1112 irq_polarity(i)); 1092 irq_polarity(i));
@@ -1344,77 +1324,100 @@ static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg,
1344 fasteoi ? "fasteoi" : "edge"); 1324 fasteoi ? "fasteoi" : "edge");
1345} 1325}
1346 1326
1347static int setup_ioapic_entry(int apic_id, int irq, 1327
1348 struct IO_APIC_route_entry *entry, 1328static int setup_ir_ioapic_entry(int irq,
1349 unsigned int destination, int trigger, 1329 struct IR_IO_APIC_route_entry *entry,
1350 int polarity, int vector, int pin) 1330 unsigned int destination, int vector,
1331 struct io_apic_irq_attr *attr)
1351{ 1332{
1352 /* 1333 int index;
1353 * add it to the IO-APIC irq-routing table: 1334 struct irte irte;
1354 */ 1335 int ioapic_id = mpc_ioapic_id(attr->ioapic);
1355 memset(entry,0,sizeof(*entry)); 1336 struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id);
1356 1337
1357 if (intr_remapping_enabled) { 1338 if (!iommu) {
1358 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id); 1339 pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
1359 struct irte irte; 1340 return -ENODEV;
1360 struct IR_IO_APIC_route_entry *ir_entry = 1341 }
1361 (struct IR_IO_APIC_route_entry *) entry;
1362 int index;
1363 1342
1364 if (!iommu) 1343 index = alloc_irte(iommu, irq, 1);
1365 panic("No mapping iommu for ioapic %d\n", apic_id); 1344 if (index < 0) {
1345 pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id);
1346 return -ENOMEM;
1347 }
1366 1348
1367 index = alloc_irte(iommu, irq, 1); 1349 prepare_irte(&irte, vector, destination);
1368 if (index < 0)
1369 panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
1370 1350
1371 prepare_irte(&irte, vector, destination); 1351 /* Set source-id of interrupt request */
1352 set_ioapic_sid(&irte, ioapic_id);
1372 1353
1373 /* Set source-id of interrupt request */ 1354 modify_irte(irq, &irte);
1374 set_ioapic_sid(&irte, apic_id);
1375 1355
1376 modify_irte(irq, &irte); 1356 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
1357 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
1358 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
1359 "Avail:%X Vector:%02X Dest:%08X "
1360 "SID:%04X SQ:%X SVT:%X)\n",
1361 attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
1362 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
1363 irte.avail, irte.vector, irte.dest_id,
1364 irte.sid, irte.sq, irte.svt);
1365
1366 memset(entry, 0, sizeof(*entry));
1367
1368 entry->index2 = (index >> 15) & 0x1;
1369 entry->zero = 0;
1370 entry->format = 1;
1371 entry->index = (index & 0x7fff);
1372 /*
1373 * IO-APIC RTE will be configured with virtual vector.
1374 * irq handler will do the explicit EOI to the io-apic.
1375 */
1376 entry->vector = attr->ioapic_pin;
1377 entry->mask = 0; /* enable IRQ */
1378 entry->trigger = attr->trigger;
1379 entry->polarity = attr->polarity;
1377 1380
1378 ir_entry->index2 = (index >> 15) & 0x1; 1381 /* Mask level triggered irqs.
1379 ir_entry->zero = 0; 1382 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1380 ir_entry->format = 1; 1383 */
1381 ir_entry->index = (index & 0x7fff); 1384 if (attr->trigger)
1382 /* 1385 entry->mask = 1;
1383 * IO-APIC RTE will be configured with virtual vector. 1386
1384 * irq handler will do the explicit EOI to the io-apic. 1387 return 0;
1385 */ 1388}
1386 ir_entry->vector = pin;
1387
1388 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
1389 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
1390 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
1391 "Avail:%X Vector:%02X Dest:%08X "
1392 "SID:%04X SQ:%X SVT:%X)\n",
1393 apic_id, irte.present, irte.fpd, irte.dst_mode,
1394 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
1395 irte.avail, irte.vector, irte.dest_id,
1396 irte.sid, irte.sq, irte.svt);
1397 } else {
1398 entry->delivery_mode = apic->irq_delivery_mode;
1399 entry->dest_mode = apic->irq_dest_mode;
1400 entry->dest = destination;
1401 entry->vector = vector;
1402 }
1403 1389
1404 entry->mask = 0; /* enable IRQ */ 1390static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
1405 entry->trigger = trigger; 1391 unsigned int destination, int vector,
1406 entry->polarity = polarity; 1392 struct io_apic_irq_attr *attr)
1393{
1394 if (intr_remapping_enabled)
1395 return setup_ir_ioapic_entry(irq,
1396 (struct IR_IO_APIC_route_entry *)entry,
1397 destination, vector, attr);
1407 1398
1408 /* Mask level triggered irqs. 1399 memset(entry, 0, sizeof(*entry));
1400
1401 entry->delivery_mode = apic->irq_delivery_mode;
1402 entry->dest_mode = apic->irq_dest_mode;
1403 entry->dest = destination;
1404 entry->vector = vector;
1405 entry->mask = 0; /* enable IRQ */
1406 entry->trigger = attr->trigger;
1407 entry->polarity = attr->polarity;
1408
1409 /*
1410 * Mask level triggered irqs.
1409 * Use IRQ_DELAYED_DISABLE for edge triggered irqs. 1411 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1410 */ 1412 */
1411 if (trigger) 1413 if (attr->trigger)
1412 entry->mask = 1; 1414 entry->mask = 1;
1415
1413 return 0; 1416 return 0;
1414} 1417}
1415 1418
1416static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, 1419static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1417 struct irq_cfg *cfg, int trigger, int polarity) 1420 struct io_apic_irq_attr *attr)
1418{ 1421{
1419 struct IO_APIC_route_entry entry; 1422 struct IO_APIC_route_entry entry;
1420 unsigned int dest; 1423 unsigned int dest;
@@ -1437,49 +1440,48 @@ static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq,
1437 apic_printk(APIC_VERBOSE,KERN_DEBUG 1440 apic_printk(APIC_VERBOSE,KERN_DEBUG
1438 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1441 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1439 "IRQ %d Mode:%i Active:%i Dest:%d)\n", 1442 "IRQ %d Mode:%i Active:%i Dest:%d)\n",
1440 apic_id, mpc_ioapic_id(apic_id), pin, cfg->vector, 1443 attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin,
1441 irq, trigger, polarity, dest); 1444 cfg->vector, irq, attr->trigger, attr->polarity, dest);
1442
1443 1445
1444 if (setup_ioapic_entry(mpc_ioapic_id(apic_id), irq, &entry, 1446 if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) {
1445 dest, trigger, polarity, cfg->vector, pin)) { 1447 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1446 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1448 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1447 mpc_ioapic_id(apic_id), pin);
1448 __clear_irq_vector(irq, cfg); 1449 __clear_irq_vector(irq, cfg);
1450
1449 return; 1451 return;
1450 } 1452 }
1451 1453
1452 ioapic_register_intr(irq, cfg, trigger); 1454 ioapic_register_intr(irq, cfg, attr->trigger);
1453 if (irq < legacy_pic->nr_legacy_irqs) 1455 if (irq < legacy_pic->nr_legacy_irqs)
1454 legacy_pic->mask(irq); 1456 legacy_pic->mask(irq);
1455 1457
1456 ioapic_write_entry(apic_id, pin, entry); 1458 ioapic_write_entry(attr->ioapic, attr->ioapic_pin, entry);
1457} 1459}
1458 1460
1459static bool __init io_apic_pin_not_connected(int idx, int apic_id, int pin) 1461static bool __init io_apic_pin_not_connected(int idx, int ioapic_idx, int pin)
1460{ 1462{
1461 if (idx != -1) 1463 if (idx != -1)
1462 return false; 1464 return false;
1463 1465
1464 apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", 1466 apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n",
1465 mpc_ioapic_id(apic_id), pin); 1467 mpc_ioapic_id(ioapic_idx), pin);
1466 return true; 1468 return true;
1467} 1469}
1468 1470
1469static void __init __io_apic_setup_irqs(unsigned int apic_id) 1471static void __init __io_apic_setup_irqs(unsigned int ioapic_idx)
1470{ 1472{
1471 int idx, node = cpu_to_node(0); 1473 int idx, node = cpu_to_node(0);
1472 struct io_apic_irq_attr attr; 1474 struct io_apic_irq_attr attr;
1473 unsigned int pin, irq; 1475 unsigned int pin, irq;
1474 1476
1475 for (pin = 0; pin < ioapics[apic_id].nr_registers; pin++) { 1477 for (pin = 0; pin < ioapics[ioapic_idx].nr_registers; pin++) {
1476 idx = find_irq_entry(apic_id, pin, mp_INT); 1478 idx = find_irq_entry(ioapic_idx, pin, mp_INT);
1477 if (io_apic_pin_not_connected(idx, apic_id, pin)) 1479 if (io_apic_pin_not_connected(idx, ioapic_idx, pin))
1478 continue; 1480 continue;
1479 1481
1480 irq = pin_2_irq(idx, apic_id, pin); 1482 irq = pin_2_irq(idx, ioapic_idx, pin);
1481 1483
1482 if ((apic_id > 0) && (irq > 16)) 1484 if ((ioapic_idx > 0) && (irq > 16))
1483 continue; 1485 continue;
1484 1486
1485 /* 1487 /*
@@ -1487,10 +1489,10 @@ static void __init __io_apic_setup_irqs(unsigned int apic_id)
1487 * installed and if it returns 1: 1489 * installed and if it returns 1:
1488 */ 1490 */
1489 if (apic->multi_timer_check && 1491 if (apic->multi_timer_check &&
1490 apic->multi_timer_check(apic_id, irq)) 1492 apic->multi_timer_check(ioapic_idx, irq))
1491 continue; 1493 continue;
1492 1494
1493 set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), 1495 set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx),
1494 irq_polarity(idx)); 1496 irq_polarity(idx));
1495 1497
1496 io_apic_setup_irq_pin(irq, node, &attr); 1498 io_apic_setup_irq_pin(irq, node, &attr);
@@ -1499,12 +1501,12 @@ static void __init __io_apic_setup_irqs(unsigned int apic_id)
1499 1501
1500static void __init setup_IO_APIC_irqs(void) 1502static void __init setup_IO_APIC_irqs(void)
1501{ 1503{
1502 unsigned int apic_id; 1504 unsigned int ioapic_idx;
1503 1505
1504 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1506 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1505 1507
1506 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) 1508 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++)
1507 __io_apic_setup_irqs(apic_id); 1509 __io_apic_setup_irqs(ioapic_idx);
1508} 1510}
1509 1511
1510/* 1512/*
@@ -1514,28 +1516,28 @@ static void __init setup_IO_APIC_irqs(void)
1514 */ 1516 */
1515void setup_IO_APIC_irq_extra(u32 gsi) 1517void setup_IO_APIC_irq_extra(u32 gsi)
1516{ 1518{
1517 int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); 1519 int ioapic_idx = 0, pin, idx, irq, node = cpu_to_node(0);
1518 struct io_apic_irq_attr attr; 1520 struct io_apic_irq_attr attr;
1519 1521
1520 /* 1522 /*
1521 * Convert 'gsi' to 'ioapic.pin'. 1523 * Convert 'gsi' to 'ioapic.pin'.
1522 */ 1524 */
1523 apic_id = mp_find_ioapic(gsi); 1525 ioapic_idx = mp_find_ioapic(gsi);
1524 if (apic_id < 0) 1526 if (ioapic_idx < 0)
1525 return; 1527 return;
1526 1528
1527 pin = mp_find_ioapic_pin(apic_id, gsi); 1529 pin = mp_find_ioapic_pin(ioapic_idx, gsi);
1528 idx = find_irq_entry(apic_id, pin, mp_INT); 1530 idx = find_irq_entry(ioapic_idx, pin, mp_INT);
1529 if (idx == -1) 1531 if (idx == -1)
1530 return; 1532 return;
1531 1533
1532 irq = pin_2_irq(idx, apic_id, pin); 1534 irq = pin_2_irq(idx, ioapic_idx, pin);
1533 1535
1534 /* Only handle the non legacy irqs on secondary ioapics */ 1536 /* Only handle the non legacy irqs on secondary ioapics */
1535 if (apic_id == 0 || irq < NR_IRQS_LEGACY) 1537 if (ioapic_idx == 0 || irq < NR_IRQS_LEGACY)
1536 return; 1538 return;
1537 1539
1538 set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), 1540 set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx),
1539 irq_polarity(idx)); 1541 irq_polarity(idx));
1540 1542
1541 io_apic_setup_irq_pin_once(irq, node, &attr); 1543 io_apic_setup_irq_pin_once(irq, node, &attr);
@@ -1544,8 +1546,8 @@ void setup_IO_APIC_irq_extra(u32 gsi)
1544/* 1546/*
1545 * Set up the timer pin, possibly with the 8259A-master behind. 1547 * Set up the timer pin, possibly with the 8259A-master behind.
1546 */ 1548 */
1547static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, 1549static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1548 int vector) 1550 unsigned int pin, int vector)
1549{ 1551{
1550 struct IO_APIC_route_entry entry; 1552 struct IO_APIC_route_entry entry;
1551 1553
@@ -1576,45 +1578,29 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
1576 /* 1578 /*
1577 * Add it to the IO-APIC irq-routing table: 1579 * Add it to the IO-APIC irq-routing table:
1578 */ 1580 */
1579 ioapic_write_entry(apic_id, pin, entry); 1581 ioapic_write_entry(ioapic_idx, pin, entry);
1580} 1582}
1581 1583
1582 1584__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1583__apicdebuginit(void) print_IO_APIC(void)
1584{ 1585{
1585 int apic, i; 1586 int i;
1586 union IO_APIC_reg_00 reg_00; 1587 union IO_APIC_reg_00 reg_00;
1587 union IO_APIC_reg_01 reg_01; 1588 union IO_APIC_reg_01 reg_01;
1588 union IO_APIC_reg_02 reg_02; 1589 union IO_APIC_reg_02 reg_02;
1589 union IO_APIC_reg_03 reg_03; 1590 union IO_APIC_reg_03 reg_03;
1590 unsigned long flags; 1591 unsigned long flags;
1591 struct irq_cfg *cfg;
1592 unsigned int irq;
1593
1594 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1595 for (i = 0; i < nr_ioapics; i++)
1596 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1597 mpc_ioapic_id(i), ioapics[i].nr_registers);
1598
1599 /*
1600 * We are a bit conservative about what we expect. We have to
1601 * know about every hardware change ASAP.
1602 */
1603 printk(KERN_INFO "testing the IO APIC.......................\n");
1604
1605 for (apic = 0; apic < nr_ioapics; apic++) {
1606 1592
1607 raw_spin_lock_irqsave(&ioapic_lock, flags); 1593 raw_spin_lock_irqsave(&ioapic_lock, flags);
1608 reg_00.raw = io_apic_read(apic, 0); 1594 reg_00.raw = io_apic_read(ioapic_idx, 0);
1609 reg_01.raw = io_apic_read(apic, 1); 1595 reg_01.raw = io_apic_read(ioapic_idx, 1);
1610 if (reg_01.bits.version >= 0x10) 1596 if (reg_01.bits.version >= 0x10)
1611 reg_02.raw = io_apic_read(apic, 2); 1597 reg_02.raw = io_apic_read(ioapic_idx, 2);
1612 if (reg_01.bits.version >= 0x20) 1598 if (reg_01.bits.version >= 0x20)
1613 reg_03.raw = io_apic_read(apic, 3); 1599 reg_03.raw = io_apic_read(ioapic_idx, 3);
1614 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1600 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1615 1601
1616 printk("\n"); 1602 printk("\n");
1617 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(apic)); 1603 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx));
1618 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1604 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1619 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1605 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1620 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 1606 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
@@ -1664,7 +1650,7 @@ __apicdebuginit(void) print_IO_APIC(void)
1664 struct IO_APIC_route_entry entry; 1650 struct IO_APIC_route_entry entry;
1665 struct IR_IO_APIC_route_entry *ir_entry; 1651 struct IR_IO_APIC_route_entry *ir_entry;
1666 1652
1667 entry = ioapic_read_entry(apic, i); 1653 entry = ioapic_read_entry(ioapic_idx, i);
1668 ir_entry = (struct IR_IO_APIC_route_entry *) &entry; 1654 ir_entry = (struct IR_IO_APIC_route_entry *) &entry;
1669 printk(KERN_DEBUG " %02x %04X ", 1655 printk(KERN_DEBUG " %02x %04X ",
1670 i, 1656 i,
@@ -1685,7 +1671,7 @@ __apicdebuginit(void) print_IO_APIC(void)
1685 } else { 1671 } else {
1686 struct IO_APIC_route_entry entry; 1672 struct IO_APIC_route_entry entry;
1687 1673
1688 entry = ioapic_read_entry(apic, i); 1674 entry = ioapic_read_entry(ioapic_idx, i);
1689 printk(KERN_DEBUG " %02x %02X ", 1675 printk(KERN_DEBUG " %02x %02X ",
1690 i, 1676 i,
1691 entry.dest 1677 entry.dest
@@ -1703,7 +1689,28 @@ __apicdebuginit(void) print_IO_APIC(void)
1703 ); 1689 );
1704 } 1690 }
1705 } 1691 }
1706 } 1692}
1693
1694__apicdebuginit(void) print_IO_APICs(void)
1695{
1696 int ioapic_idx;
1697 struct irq_cfg *cfg;
1698 unsigned int irq;
1699
1700 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1701 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++)
1702 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1703 mpc_ioapic_id(ioapic_idx),
1704 ioapics[ioapic_idx].nr_registers);
1705
1706 /*
1707 * We are a bit conservative about what we expect. We have to
1708 * know about every hardware change ASAP.
1709 */
1710 printk(KERN_INFO "testing the IO APIC.......................\n");
1711
1712 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++)
1713 print_IO_APIC(ioapic_idx);
1707 1714
1708 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1715 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1709 for_each_active_irq(irq) { 1716 for_each_active_irq(irq) {
@@ -1722,8 +1729,6 @@ __apicdebuginit(void) print_IO_APIC(void)
1722 } 1729 }
1723 1730
1724 printk(KERN_INFO ".................................... done.\n"); 1731 printk(KERN_INFO ".................................... done.\n");
1725
1726 return;
1727} 1732}
1728 1733
1729__apicdebuginit(void) print_APIC_field(int base) 1734__apicdebuginit(void) print_APIC_field(int base)
@@ -1917,7 +1922,7 @@ __apicdebuginit(int) print_ICs(void)
1917 return 0; 1922 return 0;
1918 1923
1919 print_local_APICs(show_lapic); 1924 print_local_APICs(show_lapic);
1920 print_IO_APIC(); 1925 print_IO_APICs();
1921 1926
1922 return 0; 1927 return 0;
1923} 1928}
@@ -2042,7 +2047,7 @@ void __init setup_ioapic_ids_from_mpc_nocheck(void)
2042{ 2047{
2043 union IO_APIC_reg_00 reg_00; 2048 union IO_APIC_reg_00 reg_00;
2044 physid_mask_t phys_id_present_map; 2049 physid_mask_t phys_id_present_map;
2045 int apic_id; 2050 int ioapic_idx;
2046 int i; 2051 int i;
2047 unsigned char old_id; 2052 unsigned char old_id;
2048 unsigned long flags; 2053 unsigned long flags;
@@ -2056,21 +2061,20 @@ void __init setup_ioapic_ids_from_mpc_nocheck(void)
2056 /* 2061 /*
2057 * Set the IOAPIC ID to the value stored in the MPC table. 2062 * Set the IOAPIC ID to the value stored in the MPC table.
2058 */ 2063 */
2059 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { 2064 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
2060
2061 /* Read the register 0 value */ 2065 /* Read the register 0 value */
2062 raw_spin_lock_irqsave(&ioapic_lock, flags); 2066 raw_spin_lock_irqsave(&ioapic_lock, flags);
2063 reg_00.raw = io_apic_read(apic_id, 0); 2067 reg_00.raw = io_apic_read(ioapic_idx, 0);
2064 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2068 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2065 2069
2066 old_id = mpc_ioapic_id(apic_id); 2070 old_id = mpc_ioapic_id(ioapic_idx);
2067 2071
2068 if (mpc_ioapic_id(apic_id) >= get_physical_broadcast()) { 2072 if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) {
2069 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 2073 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2070 apic_id, mpc_ioapic_id(apic_id)); 2074 ioapic_idx, mpc_ioapic_id(ioapic_idx));
2071 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2075 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2072 reg_00.bits.ID); 2076 reg_00.bits.ID);
2073 ioapics[apic_id].mp_config.apicid = reg_00.bits.ID; 2077 ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID;
2074 } 2078 }
2075 2079
2076 /* 2080 /*
@@ -2079,9 +2083,9 @@ void __init setup_ioapic_ids_from_mpc_nocheck(void)
2079 * 'stuck on smp_invalidate_needed IPI wait' messages. 2083 * 'stuck on smp_invalidate_needed IPI wait' messages.
2080 */ 2084 */
2081 if (apic->check_apicid_used(&phys_id_present_map, 2085 if (apic->check_apicid_used(&phys_id_present_map,
2082 mpc_ioapic_id(apic_id))) { 2086 mpc_ioapic_id(ioapic_idx))) {
2083 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 2087 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2084 apic_id, mpc_ioapic_id(apic_id)); 2088 ioapic_idx, mpc_ioapic_id(ioapic_idx));
2085 for (i = 0; i < get_physical_broadcast(); i++) 2089 for (i = 0; i < get_physical_broadcast(); i++)
2086 if (!physid_isset(i, phys_id_present_map)) 2090 if (!physid_isset(i, phys_id_present_map))
2087 break; 2091 break;
@@ -2090,14 +2094,14 @@ void __init setup_ioapic_ids_from_mpc_nocheck(void)
2090 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2094 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2091 i); 2095 i);
2092 physid_set(i, phys_id_present_map); 2096 physid_set(i, phys_id_present_map);
2093 ioapics[apic_id].mp_config.apicid = i; 2097 ioapics[ioapic_idx].mp_config.apicid = i;
2094 } else { 2098 } else {
2095 physid_mask_t tmp; 2099 physid_mask_t tmp;
2096 apic->apicid_to_cpu_present(mpc_ioapic_id(apic_id), 2100 apic->apicid_to_cpu_present(mpc_ioapic_id(ioapic_idx),
2097 &tmp); 2101 &tmp);
2098 apic_printk(APIC_VERBOSE, "Setting %d in the " 2102 apic_printk(APIC_VERBOSE, "Setting %d in the "
2099 "phys_id_present_map\n", 2103 "phys_id_present_map\n",
2100 mpc_ioapic_id(apic_id)); 2104 mpc_ioapic_id(ioapic_idx));
2101 physids_or(phys_id_present_map, phys_id_present_map, tmp); 2105 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2102 } 2106 }
2103 2107
@@ -2105,35 +2109,35 @@ void __init setup_ioapic_ids_from_mpc_nocheck(void)
2105 * We need to adjust the IRQ routing table 2109 * We need to adjust the IRQ routing table
2106 * if the ID changed. 2110 * if the ID changed.
2107 */ 2111 */
2108 if (old_id != mpc_ioapic_id(apic_id)) 2112 if (old_id != mpc_ioapic_id(ioapic_idx))
2109 for (i = 0; i < mp_irq_entries; i++) 2113 for (i = 0; i < mp_irq_entries; i++)
2110 if (mp_irqs[i].dstapic == old_id) 2114 if (mp_irqs[i].dstapic == old_id)
2111 mp_irqs[i].dstapic 2115 mp_irqs[i].dstapic
2112 = mpc_ioapic_id(apic_id); 2116 = mpc_ioapic_id(ioapic_idx);
2113 2117
2114 /* 2118 /*
2115 * Update the ID register according to the right value 2119 * Update the ID register according to the right value
2116 * from the MPC table if they are different. 2120 * from the MPC table if they are different.
2117 */ 2121 */
2118 if (mpc_ioapic_id(apic_id) == reg_00.bits.ID) 2122 if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID)
2119 continue; 2123 continue;
2120 2124
2121 apic_printk(APIC_VERBOSE, KERN_INFO 2125 apic_printk(APIC_VERBOSE, KERN_INFO
2122 "...changing IO-APIC physical APIC ID to %d ...", 2126 "...changing IO-APIC physical APIC ID to %d ...",
2123 mpc_ioapic_id(apic_id)); 2127 mpc_ioapic_id(ioapic_idx));
2124 2128
2125 reg_00.bits.ID = mpc_ioapic_id(apic_id); 2129 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
2126 raw_spin_lock_irqsave(&ioapic_lock, flags); 2130 raw_spin_lock_irqsave(&ioapic_lock, flags);
2127 io_apic_write(apic_id, 0, reg_00.raw); 2131 io_apic_write(ioapic_idx, 0, reg_00.raw);
2128 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2132 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2129 2133
2130 /* 2134 /*
2131 * Sanity check 2135 * Sanity check
2132 */ 2136 */
2133 raw_spin_lock_irqsave(&ioapic_lock, flags); 2137 raw_spin_lock_irqsave(&ioapic_lock, flags);
2134 reg_00.raw = io_apic_read(apic_id, 0); 2138 reg_00.raw = io_apic_read(ioapic_idx, 0);
2135 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2139 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2136 if (reg_00.bits.ID != mpc_ioapic_id(apic_id)) 2140 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx))
2137 printk("could not set ID!\n"); 2141 printk("could not set ID!\n");
2138 else 2142 else
2139 apic_printk(APIC_VERBOSE, " ok.\n"); 2143 apic_printk(APIC_VERBOSE, " ok.\n");
@@ -3001,27 +3005,26 @@ static int __init io_apic_bug_finalize(void)
3001 3005
3002late_initcall(io_apic_bug_finalize); 3006late_initcall(io_apic_bug_finalize);
3003 3007
3004static void resume_ioapic_id(int ioapic_id) 3008static void resume_ioapic_id(int ioapic_idx)
3005{ 3009{
3006 unsigned long flags; 3010 unsigned long flags;
3007 union IO_APIC_reg_00 reg_00; 3011 union IO_APIC_reg_00 reg_00;
3008 3012
3009
3010 raw_spin_lock_irqsave(&ioapic_lock, flags); 3013 raw_spin_lock_irqsave(&ioapic_lock, flags);
3011 reg_00.raw = io_apic_read(ioapic_id, 0); 3014 reg_00.raw = io_apic_read(ioapic_idx, 0);
3012 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_id)) { 3015 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) {
3013 reg_00.bits.ID = mpc_ioapic_id(ioapic_id); 3016 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
3014 io_apic_write(ioapic_id, 0, reg_00.raw); 3017 io_apic_write(ioapic_idx, 0, reg_00.raw);
3015 } 3018 }
3016 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3019 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3017} 3020}
3018 3021
3019static void ioapic_resume(void) 3022static void ioapic_resume(void)
3020{ 3023{
3021 int ioapic_id; 3024 int ioapic_idx;
3022 3025
3023 for (ioapic_id = nr_ioapics - 1; ioapic_id >= 0; ioapic_id--) 3026 for (ioapic_idx = nr_ioapics - 1; ioapic_idx >= 0; ioapic_idx--)
3024 resume_ioapic_id(ioapic_id); 3027 resume_ioapic_id(ioapic_idx);
3025 3028
3026 restore_ioapic_entries(); 3029 restore_ioapic_entries();
3027} 3030}
@@ -3558,26 +3561,25 @@ io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
3558 return -EINVAL; 3561 return -EINVAL;
3559 ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); 3562 ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin);
3560 if (!ret) 3563 if (!ret)
3561 setup_ioapic_irq(attr->ioapic, attr->ioapic_pin, irq, cfg, 3564 setup_ioapic_irq(irq, cfg, attr);
3562 attr->trigger, attr->polarity);
3563 return ret; 3565 return ret;
3564} 3566}
3565 3567
3566int io_apic_setup_irq_pin_once(unsigned int irq, int node, 3568int io_apic_setup_irq_pin_once(unsigned int irq, int node,
3567 struct io_apic_irq_attr *attr) 3569 struct io_apic_irq_attr *attr)
3568{ 3570{
3569 unsigned int id = attr->ioapic, pin = attr->ioapic_pin; 3571 unsigned int ioapic_idx = attr->ioapic, pin = attr->ioapic_pin;
3570 int ret; 3572 int ret;
3571 3573
3572 /* Avoid redundant programming */ 3574 /* Avoid redundant programming */
3573 if (test_bit(pin, ioapics[id].pin_programmed)) { 3575 if (test_bit(pin, ioapics[ioapic_idx].pin_programmed)) {
3574 pr_debug("Pin %d-%d already programmed\n", 3576 pr_debug("Pin %d-%d already programmed\n",
3575 mpc_ioapic_id(id), pin); 3577 mpc_ioapic_id(ioapic_idx), pin);
3576 return 0; 3578 return 0;
3577 } 3579 }
3578 ret = io_apic_setup_irq_pin(irq, node, attr); 3580 ret = io_apic_setup_irq_pin(irq, node, attr);
3579 if (!ret) 3581 if (!ret)
3580 set_bit(pin, ioapics[id].pin_programmed); 3582 set_bit(pin, ioapics[ioapic_idx].pin_programmed);
3581 return ret; 3583 return ret;
3582} 3584}
3583 3585
@@ -3613,7 +3615,6 @@ int get_nr_irqs_gsi(void)
3613 return nr_irqs_gsi; 3615 return nr_irqs_gsi;
3614} 3616}
3615 3617
3616#ifdef CONFIG_SPARSE_IRQ
3617int __init arch_probe_nr_irqs(void) 3618int __init arch_probe_nr_irqs(void)
3618{ 3619{
3619 int nr; 3620 int nr;
@@ -3633,7 +3634,6 @@ int __init arch_probe_nr_irqs(void)
3633 3634
3634 return NR_IRQS_LEGACY; 3635 return NR_IRQS_LEGACY;
3635} 3636}
3636#endif
3637 3637
3638int io_apic_set_pci_routing(struct device *dev, int irq, 3638int io_apic_set_pci_routing(struct device *dev, int irq,
3639 struct io_apic_irq_attr *irq_attr) 3639 struct io_apic_irq_attr *irq_attr)
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index b5254ad044ab..0787bb3412f4 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -200,14 +200,8 @@ void __init default_setup_apic_routing(void)
200 * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support 200 * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
201 */ 201 */
202 202
203 if (!cmdline_apic && apic == &apic_default) { 203 if (!cmdline_apic && apic == &apic_default)
204 struct apic *bigsmp = generic_bigsmp_probe(); 204 generic_bigsmp_probe();
205 if (bigsmp) {
206 apic = bigsmp;
207 printk(KERN_INFO "Overriding APIC driver with %s\n",
208 apic->name);
209 }
210 }
211#endif 205#endif
212 206
213 if (apic->setup_apic_routing) 207 if (apic->setup_apic_routing)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 34b18594e724..75be00ecfff2 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -672,18 +672,11 @@ void __cpuinit uv_cpu_init(void)
672/* 672/*
673 * When NMI is received, print a stack trace. 673 * When NMI is received, print a stack trace.
674 */ 674 */
675int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) 675int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
676{ 676{
677 unsigned long real_uv_nmi; 677 unsigned long real_uv_nmi;
678 int bid; 678 int bid;
679 679
680 if (reason != DIE_NMIUNKNOWN)
681 return NOTIFY_OK;
682
683 if (in_crash_kexec)
684 /* do nothing if entering the crash kernel */
685 return NOTIFY_OK;
686
687 /* 680 /*
688 * Each blade has an MMR that indicates when an NMI has been sent 681 * Each blade has an MMR that indicates when an NMI has been sent
689 * to cpus on the blade. If an NMI is detected, atomically 682 * to cpus on the blade. If an NMI is detected, atomically
@@ -704,7 +697,7 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
704 } 697 }
705 698
706 if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) 699 if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
707 return NOTIFY_DONE; 700 return NMI_DONE;
708 701
709 __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; 702 __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
710 703
@@ -717,17 +710,12 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
717 dump_stack(); 710 dump_stack();
718 spin_unlock(&uv_nmi_lock); 711 spin_unlock(&uv_nmi_lock);
719 712
720 return NOTIFY_STOP; 713 return NMI_HANDLED;
721} 714}
722 715
723static struct notifier_block uv_dump_stack_nmi_nb = {
724 .notifier_call = uv_handle_nmi,
725 .priority = NMI_LOCAL_LOW_PRIOR - 1,
726};
727
728void uv_register_nmi_notifier(void) 716void uv_register_nmi_notifier(void)
729{ 717{
730 if (register_die_notifier(&uv_dump_stack_nmi_nb)) 718 if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
731 printk(KERN_WARNING "UV NMI handler failed to register\n"); 719 printk(KERN_WARNING "UV NMI handler failed to register\n");
732} 720}
733 721
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 6042981d0309..fe6eb197f848 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -28,10 +28,15 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
28 28
29obj-$(CONFIG_PERF_EVENTS) += perf_event.o 29obj-$(CONFIG_PERF_EVENTS) += perf_event.o
30 30
31ifdef CONFIG_PERF_EVENTS
32obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o
33obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
34endif
35
31obj-$(CONFIG_X86_MCE) += mcheck/ 36obj-$(CONFIG_X86_MCE) += mcheck/
32obj-$(CONFIG_MTRR) += mtrr/ 37obj-$(CONFIG_MTRR) += mtrr/
33 38
34obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 39obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o
35 40
36quiet_cmd_mkcapflags = MKCAP $@ 41quiet_cmd_mkcapflags = MKCAP $@
37 cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ 42 cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 0ed633c5048b..6199232161cf 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -78,27 +78,20 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs)
78 78
79static cpumask_var_t mce_inject_cpumask; 79static cpumask_var_t mce_inject_cpumask;
80 80
81static int mce_raise_notify(struct notifier_block *self, 81static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
82 unsigned long val, void *data)
83{ 82{
84 struct die_args *args = (struct die_args *)data;
85 int cpu = smp_processor_id(); 83 int cpu = smp_processor_id();
86 struct mce *m = &__get_cpu_var(injectm); 84 struct mce *m = &__get_cpu_var(injectm);
87 if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask)) 85 if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
88 return NOTIFY_DONE; 86 return NMI_DONE;
89 cpumask_clear_cpu(cpu, mce_inject_cpumask); 87 cpumask_clear_cpu(cpu, mce_inject_cpumask);
90 if (m->inject_flags & MCJ_EXCEPTION) 88 if (m->inject_flags & MCJ_EXCEPTION)
91 raise_exception(m, args->regs); 89 raise_exception(m, regs);
92 else if (m->status) 90 else if (m->status)
93 raise_poll(m); 91 raise_poll(m);
94 return NOTIFY_STOP; 92 return NMI_HANDLED;
95} 93}
96 94
97static struct notifier_block mce_raise_nb = {
98 .notifier_call = mce_raise_notify,
99 .priority = NMI_LOCAL_NORMAL_PRIOR,
100};
101
102/* Inject mce on current CPU */ 95/* Inject mce on current CPU */
103static int raise_local(void) 96static int raise_local(void)
104{ 97{
@@ -216,7 +209,8 @@ static int inject_init(void)
216 return -ENOMEM; 209 return -ENOMEM;
217 printk(KERN_INFO "Machine check injector initialized\n"); 210 printk(KERN_INFO "Machine check injector initialized\n");
218 mce_chrdev_ops.write = mce_write; 211 mce_chrdev_ops.write = mce_write;
219 register_die_notifier(&mce_raise_nb); 212 register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
213 "mce_notify");
220 return 0; 214 return 0;
221} 215}
222 216
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 08363b042122..fce51ad1f362 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -908,9 +908,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)
908 908
909 percpu_inc(mce_exception_count); 909 percpu_inc(mce_exception_count);
910 910
911 if (notify_die(DIE_NMI, "machine check", regs, error_code,
912 18, SIGKILL) == NOTIFY_STOP)
913 goto out;
914 if (!banks) 911 if (!banks)
915 goto out; 912 goto out;
916 913
@@ -1140,6 +1137,15 @@ static void mce_start_timer(unsigned long data)
1140 add_timer_on(t, smp_processor_id()); 1137 add_timer_on(t, smp_processor_id());
1141} 1138}
1142 1139
1140/* Must not be called in IRQ context where del_timer_sync() can deadlock */
1141static void mce_timer_delete_all(void)
1142{
1143 int cpu;
1144
1145 for_each_online_cpu(cpu)
1146 del_timer_sync(&per_cpu(mce_timer, cpu));
1147}
1148
1143static void mce_do_trigger(struct work_struct *work) 1149static void mce_do_trigger(struct work_struct *work)
1144{ 1150{
1145 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT); 1151 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
@@ -1750,7 +1756,6 @@ static struct syscore_ops mce_syscore_ops = {
1750 1756
1751static void mce_cpu_restart(void *data) 1757static void mce_cpu_restart(void *data)
1752{ 1758{
1753 del_timer_sync(&__get_cpu_var(mce_timer));
1754 if (!mce_available(__this_cpu_ptr(&cpu_info))) 1759 if (!mce_available(__this_cpu_ptr(&cpu_info)))
1755 return; 1760 return;
1756 __mcheck_cpu_init_generic(); 1761 __mcheck_cpu_init_generic();
@@ -1760,16 +1765,15 @@ static void mce_cpu_restart(void *data)
1760/* Reinit MCEs after user configuration changes */ 1765/* Reinit MCEs after user configuration changes */
1761static void mce_restart(void) 1766static void mce_restart(void)
1762{ 1767{
1768 mce_timer_delete_all();
1763 on_each_cpu(mce_cpu_restart, NULL, 1); 1769 on_each_cpu(mce_cpu_restart, NULL, 1);
1764} 1770}
1765 1771
1766/* Toggle features for corrected errors */ 1772/* Toggle features for corrected errors */
1767static void mce_disable_ce(void *all) 1773static void mce_disable_cmci(void *data)
1768{ 1774{
1769 if (!mce_available(__this_cpu_ptr(&cpu_info))) 1775 if (!mce_available(__this_cpu_ptr(&cpu_info)))
1770 return; 1776 return;
1771 if (all)
1772 del_timer_sync(&__get_cpu_var(mce_timer));
1773 cmci_clear(); 1777 cmci_clear();
1774} 1778}
1775 1779
@@ -1852,7 +1856,8 @@ static ssize_t set_ignore_ce(struct sys_device *s,
1852 if (mce_ignore_ce ^ !!new) { 1856 if (mce_ignore_ce ^ !!new) {
1853 if (new) { 1857 if (new) {
1854 /* disable ce features */ 1858 /* disable ce features */
1855 on_each_cpu(mce_disable_ce, (void *)1, 1); 1859 mce_timer_delete_all();
1860 on_each_cpu(mce_disable_cmci, NULL, 1);
1856 mce_ignore_ce = 1; 1861 mce_ignore_ce = 1;
1857 } else { 1862 } else {
1858 /* enable ce features */ 1863 /* enable ce features */
@@ -1875,7 +1880,7 @@ static ssize_t set_cmci_disabled(struct sys_device *s,
1875 if (mce_cmci_disabled ^ !!new) { 1880 if (mce_cmci_disabled ^ !!new) {
1876 if (new) { 1881 if (new) {
1877 /* disable cmci */ 1882 /* disable cmci */
1878 on_each_cpu(mce_disable_ce, NULL, 1); 1883 on_each_cpu(mce_disable_cmci, NULL, 1);
1879 mce_cmci_disabled = 1; 1884 mce_cmci_disabled = 1;
1880 } else { 1885 } else {
1881 /* enable cmci */ 1886 /* enable cmci */
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index cfa62ec090ec..640891014b2a 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -32,6 +32,8 @@
32#include <asm/smp.h> 32#include <asm/smp.h>
33#include <asm/alternative.h> 33#include <asm/alternative.h>
34 34
35#include "perf_event.h"
36
35#if 0 37#if 0
36#undef wrmsrl 38#undef wrmsrl
37#define wrmsrl(msr, val) \ 39#define wrmsrl(msr, val) \
@@ -43,283 +45,17 @@ do { \
43} while (0) 45} while (0)
44#endif 46#endif
45 47
46/* 48struct x86_pmu x86_pmu __read_mostly;
47 * | NHM/WSM | SNB |
48 * register -------------------------------
49 * | HT | no HT | HT | no HT |
50 *-----------------------------------------
51 * offcore | core | core | cpu | core |
52 * lbr_sel | core | core | cpu | core |
53 * ld_lat | cpu | core | cpu | core |
54 *-----------------------------------------
55 *
56 * Given that there is a small number of shared regs,
57 * we can pre-allocate their slot in the per-cpu
58 * per-core reg tables.
59 */
60enum extra_reg_type {
61 EXTRA_REG_NONE = -1, /* not used */
62
63 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
64 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
65
66 EXTRA_REG_MAX /* number of entries needed */
67};
68
69struct event_constraint {
70 union {
71 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
72 u64 idxmsk64;
73 };
74 u64 code;
75 u64 cmask;
76 int weight;
77};
78
79struct amd_nb {
80 int nb_id; /* NorthBridge id */
81 int refcnt; /* reference count */
82 struct perf_event *owners[X86_PMC_IDX_MAX];
83 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
84};
85
86struct intel_percore;
87
88#define MAX_LBR_ENTRIES 16
89
90struct cpu_hw_events {
91 /*
92 * Generic x86 PMC bits
93 */
94 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
95 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
96 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
97 int enabled;
98
99 int n_events;
100 int n_added;
101 int n_txn;
102 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
103 u64 tags[X86_PMC_IDX_MAX];
104 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
105
106 unsigned int group_flag;
107
108 /*
109 * Intel DebugStore bits
110 */
111 struct debug_store *ds;
112 u64 pebs_enabled;
113
114 /*
115 * Intel LBR bits
116 */
117 int lbr_users;
118 void *lbr_context;
119 struct perf_branch_stack lbr_stack;
120 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
121
122 /*
123 * manage shared (per-core, per-cpu) registers
124 * used on Intel NHM/WSM/SNB
125 */
126 struct intel_shared_regs *shared_regs;
127
128 /*
129 * AMD specific bits
130 */
131 struct amd_nb *amd_nb;
132};
133
134#define __EVENT_CONSTRAINT(c, n, m, w) {\
135 { .idxmsk64 = (n) }, \
136 .code = (c), \
137 .cmask = (m), \
138 .weight = (w), \
139}
140
141#define EVENT_CONSTRAINT(c, n, m) \
142 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
143
144/*
145 * Constraint on the Event code.
146 */
147#define INTEL_EVENT_CONSTRAINT(c, n) \
148 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
149
150/*
151 * Constraint on the Event code + UMask + fixed-mask
152 *
153 * filter mask to validate fixed counter events.
154 * the following filters disqualify for fixed counters:
155 * - inv
156 * - edge
157 * - cnt-mask
158 * The other filters are supported by fixed counters.
159 * The any-thread option is supported starting with v3.
160 */
161#define FIXED_EVENT_CONSTRAINT(c, n) \
162 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
163
164/*
165 * Constraint on the Event code + UMask
166 */
167#define INTEL_UEVENT_CONSTRAINT(c, n) \
168 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
169
170#define EVENT_CONSTRAINT_END \
171 EVENT_CONSTRAINT(0, 0, 0)
172
173#define for_each_event_constraint(e, c) \
174 for ((e) = (c); (e)->weight; (e)++)
175
176/*
177 * Per register state.
178 */
179struct er_account {
180 raw_spinlock_t lock; /* per-core: protect structure */
181 u64 config; /* extra MSR config */
182 u64 reg; /* extra MSR number */
183 atomic_t ref; /* reference count */
184};
185
186/*
187 * Extra registers for specific events.
188 *
189 * Some events need large masks and require external MSRs.
190 * Those extra MSRs end up being shared for all events on
191 * a PMU and sometimes between PMU of sibling HT threads.
192 * In either case, the kernel needs to handle conflicting
193 * accesses to those extra, shared, regs. The data structure
194 * to manage those registers is stored in cpu_hw_event.
195 */
196struct extra_reg {
197 unsigned int event;
198 unsigned int msr;
199 u64 config_mask;
200 u64 valid_mask;
201 int idx; /* per_xxx->regs[] reg index */
202};
203
204#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
205 .event = (e), \
206 .msr = (ms), \
207 .config_mask = (m), \
208 .valid_mask = (vm), \
209 .idx = EXTRA_REG_##i \
210 }
211
212#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
213 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
214
215#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
216
217union perf_capabilities {
218 struct {
219 u64 lbr_format : 6;
220 u64 pebs_trap : 1;
221 u64 pebs_arch_reg : 1;
222 u64 pebs_format : 4;
223 u64 smm_freeze : 1;
224 };
225 u64 capabilities;
226};
227
228/*
229 * struct x86_pmu - generic x86 pmu
230 */
231struct x86_pmu {
232 /*
233 * Generic x86 PMC bits
234 */
235 const char *name;
236 int version;
237 int (*handle_irq)(struct pt_regs *);
238 void (*disable_all)(void);
239 void (*enable_all)(int added);
240 void (*enable)(struct perf_event *);
241 void (*disable)(struct perf_event *);
242 int (*hw_config)(struct perf_event *event);
243 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
244 unsigned eventsel;
245 unsigned perfctr;
246 u64 (*event_map)(int);
247 int max_events;
248 int num_counters;
249 int num_counters_fixed;
250 int cntval_bits;
251 u64 cntval_mask;
252 int apic;
253 u64 max_period;
254 struct event_constraint *
255 (*get_event_constraints)(struct cpu_hw_events *cpuc,
256 struct perf_event *event);
257
258 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
259 struct perf_event *event);
260 struct event_constraint *event_constraints;
261 void (*quirks)(void);
262 int perfctr_second_write;
263
264 int (*cpu_prepare)(int cpu);
265 void (*cpu_starting)(int cpu);
266 void (*cpu_dying)(int cpu);
267 void (*cpu_dead)(int cpu);
268
269 /*
270 * Intel Arch Perfmon v2+
271 */
272 u64 intel_ctrl;
273 union perf_capabilities intel_cap;
274 49
275 /* 50DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
276 * Intel DebugStore bits
277 */
278 int bts, pebs;
279 int bts_active, pebs_active;
280 int pebs_record_size;
281 void (*drain_pebs)(struct pt_regs *regs);
282 struct event_constraint *pebs_constraints;
283
284 /*
285 * Intel LBR
286 */
287 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
288 int lbr_nr; /* hardware stack size */
289
290 /*
291 * Extra registers for events
292 */
293 struct extra_reg *extra_regs;
294 unsigned int er_flags;
295};
296
297#define ERF_NO_HT_SHARING 1
298#define ERF_HAS_RSP_1 2
299
300static struct x86_pmu x86_pmu __read_mostly;
301
302static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
303 .enabled = 1, 51 .enabled = 1,
304}; 52};
305 53
306static int x86_perf_event_set_period(struct perf_event *event); 54u64 __read_mostly hw_cache_event_ids
307
308/*
309 * Generalized hw caching related hw_event table, filled
310 * in on a per model basis. A value of 0 means
311 * 'not supported', -1 means 'hw_event makes no sense on
312 * this CPU', any other value means the raw hw_event
313 * ID.
314 */
315
316#define C(x) PERF_COUNT_HW_CACHE_##x
317
318static u64 __read_mostly hw_cache_event_ids
319 [PERF_COUNT_HW_CACHE_MAX] 55 [PERF_COUNT_HW_CACHE_MAX]
320 [PERF_COUNT_HW_CACHE_OP_MAX] 56 [PERF_COUNT_HW_CACHE_OP_MAX]
321 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 57 [PERF_COUNT_HW_CACHE_RESULT_MAX];
322static u64 __read_mostly hw_cache_extra_regs 58u64 __read_mostly hw_cache_extra_regs
323 [PERF_COUNT_HW_CACHE_MAX] 59 [PERF_COUNT_HW_CACHE_MAX]
324 [PERF_COUNT_HW_CACHE_OP_MAX] 60 [PERF_COUNT_HW_CACHE_OP_MAX]
325 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 61 [PERF_COUNT_HW_CACHE_RESULT_MAX];
@@ -329,8 +65,7 @@ static u64 __read_mostly hw_cache_extra_regs
329 * Can only be executed on the CPU where the event is active. 65 * Can only be executed on the CPU where the event is active.
330 * Returns the delta events processed. 66 * Returns the delta events processed.
331 */ 67 */
332static u64 68u64 x86_perf_event_update(struct perf_event *event)
333x86_perf_event_update(struct perf_event *event)
334{ 69{
335 struct hw_perf_event *hwc = &event->hw; 70 struct hw_perf_event *hwc = &event->hw;
336 int shift = 64 - x86_pmu.cntval_bits; 71 int shift = 64 - x86_pmu.cntval_bits;
@@ -373,30 +108,6 @@ again:
373 return new_raw_count; 108 return new_raw_count;
374} 109}
375 110
376static inline int x86_pmu_addr_offset(int index)
377{
378 int offset;
379
380 /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
381 alternative_io(ASM_NOP2,
382 "shll $1, %%eax",
383 X86_FEATURE_PERFCTR_CORE,
384 "=a" (offset),
385 "a" (index));
386
387 return offset;
388}
389
390static inline unsigned int x86_pmu_config_addr(int index)
391{
392 return x86_pmu.eventsel + x86_pmu_addr_offset(index);
393}
394
395static inline unsigned int x86_pmu_event_addr(int index)
396{
397 return x86_pmu.perfctr + x86_pmu_addr_offset(index);
398}
399
400/* 111/*
401 * Find and validate any extra registers to set up. 112 * Find and validate any extra registers to set up.
402 */ 113 */
@@ -532,9 +243,6 @@ msr_fail:
532 return false; 243 return false;
533} 244}
534 245
535static void reserve_ds_buffers(void);
536static void release_ds_buffers(void);
537
538static void hw_perf_event_destroy(struct perf_event *event) 246static void hw_perf_event_destroy(struct perf_event *event)
539{ 247{
540 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) { 248 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
@@ -583,7 +291,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
583 return x86_pmu_extra_regs(val, event); 291 return x86_pmu_extra_regs(val, event);
584} 292}
585 293
586static int x86_setup_perfctr(struct perf_event *event) 294int x86_setup_perfctr(struct perf_event *event)
587{ 295{
588 struct perf_event_attr *attr = &event->attr; 296 struct perf_event_attr *attr = &event->attr;
589 struct hw_perf_event *hwc = &event->hw; 297 struct hw_perf_event *hwc = &event->hw;
@@ -647,7 +355,7 @@ static int x86_setup_perfctr(struct perf_event *event)
647 return 0; 355 return 0;
648} 356}
649 357
650static int x86_pmu_hw_config(struct perf_event *event) 358int x86_pmu_hw_config(struct perf_event *event)
651{ 359{
652 if (event->attr.precise_ip) { 360 if (event->attr.precise_ip) {
653 int precise = 0; 361 int precise = 0;
@@ -723,7 +431,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
723 return x86_pmu.hw_config(event); 431 return x86_pmu.hw_config(event);
724} 432}
725 433
726static void x86_pmu_disable_all(void) 434void x86_pmu_disable_all(void)
727{ 435{
728 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 436 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
729 int idx; 437 int idx;
@@ -758,15 +466,7 @@ static void x86_pmu_disable(struct pmu *pmu)
758 x86_pmu.disable_all(); 466 x86_pmu.disable_all();
759} 467}
760 468
761static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, 469void x86_pmu_enable_all(int added)
762 u64 enable_mask)
763{
764 if (hwc->extra_reg.reg)
765 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
766 wrmsrl(hwc->config_base, hwc->config | enable_mask);
767}
768
769static void x86_pmu_enable_all(int added)
770{ 470{
771 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 471 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
772 int idx; 472 int idx;
@@ -788,7 +488,7 @@ static inline int is_x86_event(struct perf_event *event)
788 return event->pmu == &pmu; 488 return event->pmu == &pmu;
789} 489}
790 490
791static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) 491int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
792{ 492{
793 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; 493 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
794 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 494 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
@@ -959,7 +659,6 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
959} 659}
960 660
961static void x86_pmu_start(struct perf_event *event, int flags); 661static void x86_pmu_start(struct perf_event *event, int flags);
962static void x86_pmu_stop(struct perf_event *event, int flags);
963 662
964static void x86_pmu_enable(struct pmu *pmu) 663static void x86_pmu_enable(struct pmu *pmu)
965{ 664{
@@ -1031,21 +730,13 @@ static void x86_pmu_enable(struct pmu *pmu)
1031 x86_pmu.enable_all(added); 730 x86_pmu.enable_all(added);
1032} 731}
1033 732
1034static inline void x86_pmu_disable_event(struct perf_event *event)
1035{
1036 struct hw_perf_event *hwc = &event->hw;
1037
1038 wrmsrl(hwc->config_base, hwc->config);
1039}
1040
1041static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 733static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1042 734
1043/* 735/*
1044 * Set the next IRQ period, based on the hwc->period_left value. 736 * Set the next IRQ period, based on the hwc->period_left value.
1045 * To be called with the event disabled in hw: 737 * To be called with the event disabled in hw:
1046 */ 738 */
1047static int 739int x86_perf_event_set_period(struct perf_event *event)
1048x86_perf_event_set_period(struct perf_event *event)
1049{ 740{
1050 struct hw_perf_event *hwc = &event->hw; 741 struct hw_perf_event *hwc = &event->hw;
1051 s64 left = local64_read(&hwc->period_left); 742 s64 left = local64_read(&hwc->period_left);
@@ -1105,7 +796,7 @@ x86_perf_event_set_period(struct perf_event *event)
1105 return ret; 796 return ret;
1106} 797}
1107 798
1108static void x86_pmu_enable_event(struct perf_event *event) 799void x86_pmu_enable_event(struct perf_event *event)
1109{ 800{
1110 if (__this_cpu_read(cpu_hw_events.enabled)) 801 if (__this_cpu_read(cpu_hw_events.enabled))
1111 __x86_pmu_enable_event(&event->hw, 802 __x86_pmu_enable_event(&event->hw,
@@ -1244,7 +935,7 @@ void perf_event_print_debug(void)
1244 local_irq_restore(flags); 935 local_irq_restore(flags);
1245} 936}
1246 937
1247static void x86_pmu_stop(struct perf_event *event, int flags) 938void x86_pmu_stop(struct perf_event *event, int flags)
1248{ 939{
1249 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 940 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1250 struct hw_perf_event *hwc = &event->hw; 941 struct hw_perf_event *hwc = &event->hw;
@@ -1297,7 +988,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
1297 perf_event_update_userpage(event); 988 perf_event_update_userpage(event);
1298} 989}
1299 990
1300static int x86_pmu_handle_irq(struct pt_regs *regs) 991int x86_pmu_handle_irq(struct pt_regs *regs)
1301{ 992{
1302 struct perf_sample_data data; 993 struct perf_sample_data data;
1303 struct cpu_hw_events *cpuc; 994 struct cpu_hw_events *cpuc;
@@ -1367,109 +1058,28 @@ void perf_events_lapic_init(void)
1367 apic_write(APIC_LVTPC, APIC_DM_NMI); 1058 apic_write(APIC_LVTPC, APIC_DM_NMI);
1368} 1059}
1369 1060
1370struct pmu_nmi_state {
1371 unsigned int marked;
1372 int handled;
1373};
1374
1375static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1376
1377static int __kprobes 1061static int __kprobes
1378perf_event_nmi_handler(struct notifier_block *self, 1062perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1379 unsigned long cmd, void *__args)
1380{ 1063{
1381 struct die_args *args = __args;
1382 unsigned int this_nmi;
1383 int handled;
1384
1385 if (!atomic_read(&active_events)) 1064 if (!atomic_read(&active_events))
1386 return NOTIFY_DONE; 1065 return NMI_DONE;
1387
1388 switch (cmd) {
1389 case DIE_NMI:
1390 break;
1391 case DIE_NMIUNKNOWN:
1392 this_nmi = percpu_read(irq_stat.__nmi_count);
1393 if (this_nmi != __this_cpu_read(pmu_nmi.marked))
1394 /* let the kernel handle the unknown nmi */
1395 return NOTIFY_DONE;
1396 /*
1397 * This one is a PMU back-to-back nmi. Two events
1398 * trigger 'simultaneously' raising two back-to-back
1399 * NMIs. If the first NMI handles both, the latter
1400 * will be empty and daze the CPU. So, we drop it to
1401 * avoid false-positive 'unknown nmi' messages.
1402 */
1403 return NOTIFY_STOP;
1404 default:
1405 return NOTIFY_DONE;
1406 }
1407
1408 handled = x86_pmu.handle_irq(args->regs);
1409 if (!handled)
1410 return NOTIFY_DONE;
1411
1412 this_nmi = percpu_read(irq_stat.__nmi_count);
1413 if ((handled > 1) ||
1414 /* the next nmi could be a back-to-back nmi */
1415 ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
1416 (__this_cpu_read(pmu_nmi.handled) > 1))) {
1417 /*
1418 * We could have two subsequent back-to-back nmis: The
1419 * first handles more than one counter, the 2nd
1420 * handles only one counter and the 3rd handles no
1421 * counter.
1422 *
1423 * This is the 2nd nmi because the previous was
1424 * handling more than one counter. We will mark the
1425 * next (3rd) and then drop it if unhandled.
1426 */
1427 __this_cpu_write(pmu_nmi.marked, this_nmi + 1);
1428 __this_cpu_write(pmu_nmi.handled, handled);
1429 }
1430 1066
1431 return NOTIFY_STOP; 1067 return x86_pmu.handle_irq(regs);
1432} 1068}
1433 1069
1434static __read_mostly struct notifier_block perf_event_nmi_notifier = { 1070struct event_constraint emptyconstraint;
1435 .notifier_call = perf_event_nmi_handler, 1071struct event_constraint unconstrained;
1436 .next = NULL,
1437 .priority = NMI_LOCAL_LOW_PRIOR,
1438};
1439
1440static struct event_constraint unconstrained;
1441static struct event_constraint emptyconstraint;
1442
1443static struct event_constraint *
1444x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1445{
1446 struct event_constraint *c;
1447
1448 if (x86_pmu.event_constraints) {
1449 for_each_event_constraint(c, x86_pmu.event_constraints) {
1450 if ((event->hw.config & c->cmask) == c->code)
1451 return c;
1452 }
1453 }
1454
1455 return &unconstrained;
1456}
1457
1458#include "perf_event_amd.c"
1459#include "perf_event_p6.c"
1460#include "perf_event_p4.c"
1461#include "perf_event_intel_lbr.c"
1462#include "perf_event_intel_ds.c"
1463#include "perf_event_intel.c"
1464 1072
1465static int __cpuinit 1073static int __cpuinit
1466x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1074x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1467{ 1075{
1468 unsigned int cpu = (long)hcpu; 1076 unsigned int cpu = (long)hcpu;
1077 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1469 int ret = NOTIFY_OK; 1078 int ret = NOTIFY_OK;
1470 1079
1471 switch (action & ~CPU_TASKS_FROZEN) { 1080 switch (action & ~CPU_TASKS_FROZEN) {
1472 case CPU_UP_PREPARE: 1081 case CPU_UP_PREPARE:
1082 cpuc->kfree_on_online = NULL;
1473 if (x86_pmu.cpu_prepare) 1083 if (x86_pmu.cpu_prepare)
1474 ret = x86_pmu.cpu_prepare(cpu); 1084 ret = x86_pmu.cpu_prepare(cpu);
1475 break; 1085 break;
@@ -1479,6 +1089,10 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1479 x86_pmu.cpu_starting(cpu); 1089 x86_pmu.cpu_starting(cpu);
1480 break; 1090 break;
1481 1091
1092 case CPU_ONLINE:
1093 kfree(cpuc->kfree_on_online);
1094 break;
1095
1482 case CPU_DYING: 1096 case CPU_DYING:
1483 if (x86_pmu.cpu_dying) 1097 if (x86_pmu.cpu_dying)
1484 x86_pmu.cpu_dying(cpu); 1098 x86_pmu.cpu_dying(cpu);
@@ -1557,7 +1171,7 @@ static int __init init_hw_perf_events(void)
1557 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; 1171 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1558 1172
1559 perf_events_lapic_init(); 1173 perf_events_lapic_init();
1560 register_die_notifier(&perf_event_nmi_notifier); 1174 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
1561 1175
1562 unconstrained = (struct event_constraint) 1176 unconstrained = (struct event_constraint)
1563 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 1177 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
new file mode 100644
index 000000000000..b9698d40ac4b
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -0,0 +1,505 @@
1/*
2 * Performance events x86 architecture header
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14
15#include <linux/perf_event.h>
16
17/*
18 * | NHM/WSM | SNB |
19 * register -------------------------------
20 * | HT | no HT | HT | no HT |
21 *-----------------------------------------
22 * offcore | core | core | cpu | core |
23 * lbr_sel | core | core | cpu | core |
24 * ld_lat | cpu | core | cpu | core |
25 *-----------------------------------------
26 *
27 * Given that there is a small number of shared regs,
28 * we can pre-allocate their slot in the per-cpu
29 * per-core reg tables.
30 */
31enum extra_reg_type {
32 EXTRA_REG_NONE = -1, /* not used */
33
34 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
35 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
36
37 EXTRA_REG_MAX /* number of entries needed */
38};
39
40struct event_constraint {
41 union {
42 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
43 u64 idxmsk64;
44 };
45 u64 code;
46 u64 cmask;
47 int weight;
48};
49
50struct amd_nb {
51 int nb_id; /* NorthBridge id */
52 int refcnt; /* reference count */
53 struct perf_event *owners[X86_PMC_IDX_MAX];
54 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
55};
56
57/* The maximal number of PEBS events: */
58#define MAX_PEBS_EVENTS 4
59
60/*
61 * A debug store configuration.
62 *
63 * We only support architectures that use 64bit fields.
64 */
65struct debug_store {
66 u64 bts_buffer_base;
67 u64 bts_index;
68 u64 bts_absolute_maximum;
69 u64 bts_interrupt_threshold;
70 u64 pebs_buffer_base;
71 u64 pebs_index;
72 u64 pebs_absolute_maximum;
73 u64 pebs_interrupt_threshold;
74 u64 pebs_event_reset[MAX_PEBS_EVENTS];
75};
76
77/*
78 * Per register state.
79 */
80struct er_account {
81 raw_spinlock_t lock; /* per-core: protect structure */
82 u64 config; /* extra MSR config */
83 u64 reg; /* extra MSR number */
84 atomic_t ref; /* reference count */
85};
86
87/*
88 * Per core/cpu state
89 *
90 * Used to coordinate shared registers between HT threads or
91 * among events on a single PMU.
92 */
93struct intel_shared_regs {
94 struct er_account regs[EXTRA_REG_MAX];
95 int refcnt; /* per-core: #HT threads */
96 unsigned core_id; /* per-core: core id */
97};
98
99#define MAX_LBR_ENTRIES 16
100
101struct cpu_hw_events {
102 /*
103 * Generic x86 PMC bits
104 */
105 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
106 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
107 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
108 int enabled;
109
110 int n_events;
111 int n_added;
112 int n_txn;
113 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
114 u64 tags[X86_PMC_IDX_MAX];
115 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
116
117 unsigned int group_flag;
118
119 /*
120 * Intel DebugStore bits
121 */
122 struct debug_store *ds;
123 u64 pebs_enabled;
124
125 /*
126 * Intel LBR bits
127 */
128 int lbr_users;
129 void *lbr_context;
130 struct perf_branch_stack lbr_stack;
131 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
132
133 /*
134 * Intel host/guest exclude bits
135 */
136 u64 intel_ctrl_guest_mask;
137 u64 intel_ctrl_host_mask;
138 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
139
140 /*
141 * manage shared (per-core, per-cpu) registers
142 * used on Intel NHM/WSM/SNB
143 */
144 struct intel_shared_regs *shared_regs;
145
146 /*
147 * AMD specific bits
148 */
149 struct amd_nb *amd_nb;
150
151 void *kfree_on_online;
152};
153
154#define __EVENT_CONSTRAINT(c, n, m, w) {\
155 { .idxmsk64 = (n) }, \
156 .code = (c), \
157 .cmask = (m), \
158 .weight = (w), \
159}
160
161#define EVENT_CONSTRAINT(c, n, m) \
162 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
163
164/*
165 * Constraint on the Event code.
166 */
167#define INTEL_EVENT_CONSTRAINT(c, n) \
168 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
169
170/*
171 * Constraint on the Event code + UMask + fixed-mask
172 *
173 * filter mask to validate fixed counter events.
174 * the following filters disqualify for fixed counters:
175 * - inv
176 * - edge
177 * - cnt-mask
178 * The other filters are supported by fixed counters.
179 * The any-thread option is supported starting with v3.
180 */
181#define FIXED_EVENT_CONSTRAINT(c, n) \
182 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
183
184/*
185 * Constraint on the Event code + UMask
186 */
187#define INTEL_UEVENT_CONSTRAINT(c, n) \
188 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
189
190#define EVENT_CONSTRAINT_END \
191 EVENT_CONSTRAINT(0, 0, 0)
192
193#define for_each_event_constraint(e, c) \
194 for ((e) = (c); (e)->weight; (e)++)
195
196/*
197 * Extra registers for specific events.
198 *
199 * Some events need large masks and require external MSRs.
200 * Those extra MSRs end up being shared for all events on
201 * a PMU and sometimes between PMU of sibling HT threads.
202 * In either case, the kernel needs to handle conflicting
203 * accesses to those extra, shared, regs. The data structure
204 * to manage those registers is stored in cpu_hw_event.
205 */
206struct extra_reg {
207 unsigned int event;
208 unsigned int msr;
209 u64 config_mask;
210 u64 valid_mask;
211 int idx; /* per_xxx->regs[] reg index */
212};
213
214#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
215 .event = (e), \
216 .msr = (ms), \
217 .config_mask = (m), \
218 .valid_mask = (vm), \
219 .idx = EXTRA_REG_##i \
220 }
221
222#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
223 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
224
225#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
226
227union perf_capabilities {
228 struct {
229 u64 lbr_format:6;
230 u64 pebs_trap:1;
231 u64 pebs_arch_reg:1;
232 u64 pebs_format:4;
233 u64 smm_freeze:1;
234 };
235 u64 capabilities;
236};
237
238/*
239 * struct x86_pmu - generic x86 pmu
240 */
241struct x86_pmu {
242 /*
243 * Generic x86 PMC bits
244 */
245 const char *name;
246 int version;
247 int (*handle_irq)(struct pt_regs *);
248 void (*disable_all)(void);
249 void (*enable_all)(int added);
250 void (*enable)(struct perf_event *);
251 void (*disable)(struct perf_event *);
252 int (*hw_config)(struct perf_event *event);
253 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
254 unsigned eventsel;
255 unsigned perfctr;
256 u64 (*event_map)(int);
257 int max_events;
258 int num_counters;
259 int num_counters_fixed;
260 int cntval_bits;
261 u64 cntval_mask;
262 int apic;
263 u64 max_period;
264 struct event_constraint *
265 (*get_event_constraints)(struct cpu_hw_events *cpuc,
266 struct perf_event *event);
267
268 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
269 struct perf_event *event);
270 struct event_constraint *event_constraints;
271 void (*quirks)(void);
272 int perfctr_second_write;
273
274 int (*cpu_prepare)(int cpu);
275 void (*cpu_starting)(int cpu);
276 void (*cpu_dying)(int cpu);
277 void (*cpu_dead)(int cpu);
278
279 /*
280 * Intel Arch Perfmon v2+
281 */
282 u64 intel_ctrl;
283 union perf_capabilities intel_cap;
284
285 /*
286 * Intel DebugStore bits
287 */
288 int bts, pebs;
289 int bts_active, pebs_active;
290 int pebs_record_size;
291 void (*drain_pebs)(struct pt_regs *regs);
292 struct event_constraint *pebs_constraints;
293
294 /*
295 * Intel LBR
296 */
297 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
298 int lbr_nr; /* hardware stack size */
299
300 /*
301 * Extra registers for events
302 */
303 struct extra_reg *extra_regs;
304 unsigned int er_flags;
305
306 /*
307 * Intel host/guest support (KVM)
308 */
309 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
310};
311
312#define ERF_NO_HT_SHARING 1
313#define ERF_HAS_RSP_1 2
314
315extern struct x86_pmu x86_pmu __read_mostly;
316
317DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
318
319int x86_perf_event_set_period(struct perf_event *event);
320
321/*
322 * Generalized hw caching related hw_event table, filled
323 * in on a per model basis. A value of 0 means
324 * 'not supported', -1 means 'hw_event makes no sense on
325 * this CPU', any other value means the raw hw_event
326 * ID.
327 */
328
329#define C(x) PERF_COUNT_HW_CACHE_##x
330
331extern u64 __read_mostly hw_cache_event_ids
332 [PERF_COUNT_HW_CACHE_MAX]
333 [PERF_COUNT_HW_CACHE_OP_MAX]
334 [PERF_COUNT_HW_CACHE_RESULT_MAX];
335extern u64 __read_mostly hw_cache_extra_regs
336 [PERF_COUNT_HW_CACHE_MAX]
337 [PERF_COUNT_HW_CACHE_OP_MAX]
338 [PERF_COUNT_HW_CACHE_RESULT_MAX];
339
340u64 x86_perf_event_update(struct perf_event *event);
341
342static inline int x86_pmu_addr_offset(int index)
343{
344 int offset;
345
346 /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
347 alternative_io(ASM_NOP2,
348 "shll $1, %%eax",
349 X86_FEATURE_PERFCTR_CORE,
350 "=a" (offset),
351 "a" (index));
352
353 return offset;
354}
355
356static inline unsigned int x86_pmu_config_addr(int index)
357{
358 return x86_pmu.eventsel + x86_pmu_addr_offset(index);
359}
360
361static inline unsigned int x86_pmu_event_addr(int index)
362{
363 return x86_pmu.perfctr + x86_pmu_addr_offset(index);
364}
365
366int x86_setup_perfctr(struct perf_event *event);
367
368int x86_pmu_hw_config(struct perf_event *event);
369
370void x86_pmu_disable_all(void);
371
372static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
373 u64 enable_mask)
374{
375 if (hwc->extra_reg.reg)
376 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
377 wrmsrl(hwc->config_base, hwc->config | enable_mask);
378}
379
380void x86_pmu_enable_all(int added);
381
382int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
383
384void x86_pmu_stop(struct perf_event *event, int flags);
385
386static inline void x86_pmu_disable_event(struct perf_event *event)
387{
388 struct hw_perf_event *hwc = &event->hw;
389
390 wrmsrl(hwc->config_base, hwc->config);
391}
392
393void x86_pmu_enable_event(struct perf_event *event);
394
395int x86_pmu_handle_irq(struct pt_regs *regs);
396
397extern struct event_constraint emptyconstraint;
398
399extern struct event_constraint unconstrained;
400
401#ifdef CONFIG_CPU_SUP_AMD
402
403int amd_pmu_init(void);
404
405#else /* CONFIG_CPU_SUP_AMD */
406
407static inline int amd_pmu_init(void)
408{
409 return 0;
410}
411
412#endif /* CONFIG_CPU_SUP_AMD */
413
414#ifdef CONFIG_CPU_SUP_INTEL
415
416int intel_pmu_save_and_restart(struct perf_event *event);
417
418struct event_constraint *
419x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event);
420
421struct intel_shared_regs *allocate_shared_regs(int cpu);
422
423int intel_pmu_init(void);
424
425void init_debug_store_on_cpu(int cpu);
426
427void fini_debug_store_on_cpu(int cpu);
428
429void release_ds_buffers(void);
430
431void reserve_ds_buffers(void);
432
433extern struct event_constraint bts_constraint;
434
435void intel_pmu_enable_bts(u64 config);
436
437void intel_pmu_disable_bts(void);
438
439int intel_pmu_drain_bts_buffer(void);
440
441extern struct event_constraint intel_core2_pebs_event_constraints[];
442
443extern struct event_constraint intel_atom_pebs_event_constraints[];
444
445extern struct event_constraint intel_nehalem_pebs_event_constraints[];
446
447extern struct event_constraint intel_westmere_pebs_event_constraints[];
448
449extern struct event_constraint intel_snb_pebs_event_constraints[];
450
451struct event_constraint *intel_pebs_constraints(struct perf_event *event);
452
453void intel_pmu_pebs_enable(struct perf_event *event);
454
455void intel_pmu_pebs_disable(struct perf_event *event);
456
457void intel_pmu_pebs_enable_all(void);
458
459void intel_pmu_pebs_disable_all(void);
460
461void intel_ds_init(void);
462
463void intel_pmu_lbr_reset(void);
464
465void intel_pmu_lbr_enable(struct perf_event *event);
466
467void intel_pmu_lbr_disable(struct perf_event *event);
468
469void intel_pmu_lbr_enable_all(void);
470
471void intel_pmu_lbr_disable_all(void);
472
473void intel_pmu_lbr_read(void);
474
475void intel_pmu_lbr_init_core(void);
476
477void intel_pmu_lbr_init_nhm(void);
478
479void intel_pmu_lbr_init_atom(void);
480
481int p4_pmu_init(void);
482
483int p6_pmu_init(void);
484
485#else /* CONFIG_CPU_SUP_INTEL */
486
487static inline void reserve_ds_buffers(void)
488{
489}
490
491static inline void release_ds_buffers(void)
492{
493}
494
495static inline int intel_pmu_init(void)
496{
497 return 0;
498}
499
500static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
501{
502 return NULL;
503}
504
505#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 941caa2e449b..aeefd45697a2 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -1,4 +1,10 @@
1#ifdef CONFIG_CPU_SUP_AMD 1#include <linux/perf_event.h>
2#include <linux/types.h>
3#include <linux/init.h>
4#include <linux/slab.h>
5#include <asm/apicdef.h>
6
7#include "perf_event.h"
2 8
3static __initconst const u64 amd_hw_cache_event_ids 9static __initconst const u64 amd_hw_cache_event_ids
4 [PERF_COUNT_HW_CACHE_MAX] 10 [PERF_COUNT_HW_CACHE_MAX]
@@ -132,6 +138,19 @@ static int amd_pmu_hw_config(struct perf_event *event)
132 if (ret) 138 if (ret)
133 return ret; 139 return ret;
134 140
141 if (event->attr.exclude_host && event->attr.exclude_guest)
142 /*
143 * When HO == GO == 1 the hardware treats that as GO == HO == 0
144 * and will count in both modes. We don't want to count in that
145 * case so we emulate no-counting by setting US = OS = 0.
146 */
147 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
148 ARCH_PERFMON_EVENTSEL_OS);
149 else if (event->attr.exclude_host)
150 event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
151 else if (event->attr.exclude_guest)
152 event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
153
135 if (event->attr.type != PERF_TYPE_RAW) 154 if (event->attr.type != PERF_TYPE_RAW)
136 return 0; 155 return 0;
137 156
@@ -350,7 +369,7 @@ static void amd_pmu_cpu_starting(int cpu)
350 continue; 369 continue;
351 370
352 if (nb->nb_id == nb_id) { 371 if (nb->nb_id == nb_id) {
353 kfree(cpuc->amd_nb); 372 cpuc->kfree_on_online = cpuc->amd_nb;
354 cpuc->amd_nb = nb; 373 cpuc->amd_nb = nb;
355 break; 374 break;
356 } 375 }
@@ -392,7 +411,7 @@ static __initconst const struct x86_pmu amd_pmu = {
392 .perfctr = MSR_K7_PERFCTR0, 411 .perfctr = MSR_K7_PERFCTR0,
393 .event_map = amd_pmu_event_map, 412 .event_map = amd_pmu_event_map,
394 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 413 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
395 .num_counters = 4, 414 .num_counters = AMD64_NUM_COUNTERS,
396 .cntval_bits = 48, 415 .cntval_bits = 48,
397 .cntval_mask = (1ULL << 48) - 1, 416 .cntval_mask = (1ULL << 48) - 1,
398 .apic = 1, 417 .apic = 1,
@@ -556,7 +575,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
556 .perfctr = MSR_F15H_PERF_CTR, 575 .perfctr = MSR_F15H_PERF_CTR,
557 .event_map = amd_pmu_event_map, 576 .event_map = amd_pmu_event_map,
558 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 577 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
559 .num_counters = 6, 578 .num_counters = AMD64_NUM_COUNTERS_F15H,
560 .cntval_bits = 48, 579 .cntval_bits = 48,
561 .cntval_mask = (1ULL << 48) - 1, 580 .cntval_mask = (1ULL << 48) - 1,
562 .apic = 1, 581 .apic = 1,
@@ -573,7 +592,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
573#endif 592#endif
574}; 593};
575 594
576static __init int amd_pmu_init(void) 595__init int amd_pmu_init(void)
577{ 596{
578 /* Performance-monitoring supported from K7 and later: */ 597 /* Performance-monitoring supported from K7 and later: */
579 if (boot_cpu_data.x86 < 6) 598 if (boot_cpu_data.x86 < 6)
@@ -602,12 +621,3 @@ static __init int amd_pmu_init(void)
602 621
603 return 0; 622 return 0;
604} 623}
605
606#else /* CONFIG_CPU_SUP_AMD */
607
608static int amd_pmu_init(void)
609{
610 return 0;
611}
612
613#endif
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
new file mode 100644
index 000000000000..ab6343d21825
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -0,0 +1,294 @@
1/*
2 * Performance events - AMD IBS
3 *
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
5 *
6 * For licencing details see kernel-base/COPYING
7 */
8
9#include <linux/perf_event.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12
13#include <asm/apic.h>
14
15static u32 ibs_caps;
16
17#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
18
19static struct pmu perf_ibs;
20
21static int perf_ibs_init(struct perf_event *event)
22{
23 if (perf_ibs.type != event->attr.type)
24 return -ENOENT;
25 return 0;
26}
27
28static int perf_ibs_add(struct perf_event *event, int flags)
29{
30 return 0;
31}
32
33static void perf_ibs_del(struct perf_event *event, int flags)
34{
35}
36
37static struct pmu perf_ibs = {
38 .event_init= perf_ibs_init,
39 .add= perf_ibs_add,
40 .del= perf_ibs_del,
41};
42
43static __init int perf_event_ibs_init(void)
44{
45 if (!ibs_caps)
46 return -ENODEV; /* ibs not supported by the cpu */
47
48 perf_pmu_register(&perf_ibs, "ibs", -1);
49 printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
50
51 return 0;
52}
53
54#else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
55
56static __init int perf_event_ibs_init(void) { return 0; }
57
58#endif
59
60/* IBS - apic initialization, for perf and oprofile */
61
62static __init u32 __get_ibs_caps(void)
63{
64 u32 caps;
65 unsigned int max_level;
66
67 if (!boot_cpu_has(X86_FEATURE_IBS))
68 return 0;
69
70 /* check IBS cpuid feature flags */
71 max_level = cpuid_eax(0x80000000);
72 if (max_level < IBS_CPUID_FEATURES)
73 return IBS_CAPS_DEFAULT;
74
75 caps = cpuid_eax(IBS_CPUID_FEATURES);
76 if (!(caps & IBS_CAPS_AVAIL))
77 /* cpuid flags not valid */
78 return IBS_CAPS_DEFAULT;
79
80 return caps;
81}
82
83u32 get_ibs_caps(void)
84{
85 return ibs_caps;
86}
87
88EXPORT_SYMBOL(get_ibs_caps);
89
90static inline int get_eilvt(int offset)
91{
92 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
93}
94
95static inline int put_eilvt(int offset)
96{
97 return !setup_APIC_eilvt(offset, 0, 0, 1);
98}
99
100/*
101 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
102 */
103static inline int ibs_eilvt_valid(void)
104{
105 int offset;
106 u64 val;
107 int valid = 0;
108
109 preempt_disable();
110
111 rdmsrl(MSR_AMD64_IBSCTL, val);
112 offset = val & IBSCTL_LVT_OFFSET_MASK;
113
114 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
115 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
116 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
117 goto out;
118 }
119
120 if (!get_eilvt(offset)) {
121 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
122 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
123 goto out;
124 }
125
126 valid = 1;
127out:
128 preempt_enable();
129
130 return valid;
131}
132
133static int setup_ibs_ctl(int ibs_eilvt_off)
134{
135 struct pci_dev *cpu_cfg;
136 int nodes;
137 u32 value = 0;
138
139 nodes = 0;
140 cpu_cfg = NULL;
141 do {
142 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
143 PCI_DEVICE_ID_AMD_10H_NB_MISC,
144 cpu_cfg);
145 if (!cpu_cfg)
146 break;
147 ++nodes;
148 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
149 | IBSCTL_LVT_OFFSET_VALID);
150 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
151 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
152 pci_dev_put(cpu_cfg);
153 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
154 "IBSCTL = 0x%08x\n", value);
155 return -EINVAL;
156 }
157 } while (1);
158
159 if (!nodes) {
160 printk(KERN_DEBUG "No CPU node configured for IBS\n");
161 return -ENODEV;
162 }
163
164 return 0;
165}
166
167/*
168 * This runs only on the current cpu. We try to find an LVT offset and
169 * setup the local APIC. For this we must disable preemption. On
170 * success we initialize all nodes with this offset. This updates then
171 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
172 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
173 * is using the new offset.
174 */
175static int force_ibs_eilvt_setup(void)
176{
177 int offset;
178 int ret;
179
180 preempt_disable();
181 /* find the next free available EILVT entry, skip offset 0 */
182 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
183 if (get_eilvt(offset))
184 break;
185 }
186 preempt_enable();
187
188 if (offset == APIC_EILVT_NR_MAX) {
189 printk(KERN_DEBUG "No EILVT entry available\n");
190 return -EBUSY;
191 }
192
193 ret = setup_ibs_ctl(offset);
194 if (ret)
195 goto out;
196
197 if (!ibs_eilvt_valid()) {
198 ret = -EFAULT;
199 goto out;
200 }
201
202 pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
203 pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
204
205 return 0;
206out:
207 preempt_disable();
208 put_eilvt(offset);
209 preempt_enable();
210 return ret;
211}
212
213static inline int get_ibs_lvt_offset(void)
214{
215 u64 val;
216
217 rdmsrl(MSR_AMD64_IBSCTL, val);
218 if (!(val & IBSCTL_LVT_OFFSET_VALID))
219 return -EINVAL;
220
221 return val & IBSCTL_LVT_OFFSET_MASK;
222}
223
224static void setup_APIC_ibs(void *dummy)
225{
226 int offset;
227
228 offset = get_ibs_lvt_offset();
229 if (offset < 0)
230 goto failed;
231
232 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
233 return;
234failed:
235 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
236 smp_processor_id());
237}
238
239static void clear_APIC_ibs(void *dummy)
240{
241 int offset;
242
243 offset = get_ibs_lvt_offset();
244 if (offset >= 0)
245 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
246}
247
248static int __cpuinit
249perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
250{
251 switch (action & ~CPU_TASKS_FROZEN) {
252 case CPU_STARTING:
253 setup_APIC_ibs(NULL);
254 break;
255 case CPU_DYING:
256 clear_APIC_ibs(NULL);
257 break;
258 default:
259 break;
260 }
261
262 return NOTIFY_OK;
263}
264
265static __init int amd_ibs_init(void)
266{
267 u32 caps;
268 int ret;
269
270 caps = __get_ibs_caps();
271 if (!caps)
272 return -ENODEV; /* ibs not supported by the cpu */
273
274 if (!ibs_eilvt_valid()) {
275 ret = force_ibs_eilvt_setup();
276 if (ret) {
277 pr_err("Failed to setup IBS, %d\n", ret);
278 return ret;
279 }
280 }
281
282 get_online_cpus();
283 ibs_caps = caps;
284 /* make ibs_caps visible to other cpus: */
285 smp_mb();
286 perf_cpu_notifier(perf_ibs_cpu_notifier);
287 smp_call_function(setup_APIC_ibs, NULL, 1);
288 put_online_cpus();
289
290 return perf_event_ibs_init();
291}
292
293/* Since we need the pci subsystem to init ibs we can't do this earlier: */
294device_initcall(amd_ibs_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f88af2c2a561..e09ca20e86ee 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1,16 +1,19 @@
1#ifdef CONFIG_CPU_SUP_INTEL
2
3/* 1/*
4 * Per core/cpu state 2 * Per core/cpu state
5 * 3 *
6 * Used to coordinate shared registers between HT threads or 4 * Used to coordinate shared registers between HT threads or
7 * among events on a single PMU. 5 * among events on a single PMU.
8 */ 6 */
9struct intel_shared_regs { 7
10 struct er_account regs[EXTRA_REG_MAX]; 8#include <linux/stddef.h>
11 int refcnt; /* per-core: #HT threads */ 9#include <linux/types.h>
12 unsigned core_id; /* per-core: core id */ 10#include <linux/init.h>
13}; 11#include <linux/slab.h>
12
13#include <asm/hardirq.h>
14#include <asm/apic.h>
15
16#include "perf_event.h"
14 17
15/* 18/*
16 * Intel PerfMon, used on Core and later. 19 * Intel PerfMon, used on Core and later.
@@ -746,7 +749,8 @@ static void intel_pmu_enable_all(int added)
746 749
747 intel_pmu_pebs_enable_all(); 750 intel_pmu_pebs_enable_all();
748 intel_pmu_lbr_enable_all(); 751 intel_pmu_lbr_enable_all();
749 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 752 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
753 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
750 754
751 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 755 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
752 struct perf_event *event = 756 struct perf_event *event =
@@ -869,6 +873,7 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
869static void intel_pmu_disable_event(struct perf_event *event) 873static void intel_pmu_disable_event(struct perf_event *event)
870{ 874{
871 struct hw_perf_event *hwc = &event->hw; 875 struct hw_perf_event *hwc = &event->hw;
876 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
872 877
873 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { 878 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
874 intel_pmu_disable_bts(); 879 intel_pmu_disable_bts();
@@ -876,6 +881,9 @@ static void intel_pmu_disable_event(struct perf_event *event)
876 return; 881 return;
877 } 882 }
878 883
884 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
885 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
886
879 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 887 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
880 intel_pmu_disable_fixed(hwc); 888 intel_pmu_disable_fixed(hwc);
881 return; 889 return;
@@ -921,6 +929,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
921static void intel_pmu_enable_event(struct perf_event *event) 929static void intel_pmu_enable_event(struct perf_event *event)
922{ 930{
923 struct hw_perf_event *hwc = &event->hw; 931 struct hw_perf_event *hwc = &event->hw;
932 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
924 933
925 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { 934 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
926 if (!__this_cpu_read(cpu_hw_events.enabled)) 935 if (!__this_cpu_read(cpu_hw_events.enabled))
@@ -930,6 +939,11 @@ static void intel_pmu_enable_event(struct perf_event *event)
930 return; 939 return;
931 } 940 }
932 941
942 if (event->attr.exclude_host)
943 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
944 if (event->attr.exclude_guest)
945 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
946
933 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 947 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
934 intel_pmu_enable_fixed(hwc); 948 intel_pmu_enable_fixed(hwc);
935 return; 949 return;
@@ -945,7 +959,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
945 * Save and restart an expired event. Called by NMI contexts, 959 * Save and restart an expired event. Called by NMI contexts,
946 * so it has to be careful about preempting normal event ops: 960 * so it has to be careful about preempting normal event ops:
947 */ 961 */
948static int intel_pmu_save_and_restart(struct perf_event *event) 962int intel_pmu_save_and_restart(struct perf_event *event)
949{ 963{
950 x86_perf_event_update(event); 964 x86_perf_event_update(event);
951 return x86_perf_event_set_period(event); 965 return x86_perf_event_set_period(event);
@@ -1197,6 +1211,21 @@ intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1197 return c; 1211 return c;
1198} 1212}
1199 1213
1214struct event_constraint *
1215x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1216{
1217 struct event_constraint *c;
1218
1219 if (x86_pmu.event_constraints) {
1220 for_each_event_constraint(c, x86_pmu.event_constraints) {
1221 if ((event->hw.config & c->cmask) == c->code)
1222 return c;
1223 }
1224 }
1225
1226 return &unconstrained;
1227}
1228
1200static struct event_constraint * 1229static struct event_constraint *
1201intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) 1230intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1202{ 1231{
@@ -1284,12 +1313,84 @@ static int intel_pmu_hw_config(struct perf_event *event)
1284 return 0; 1313 return 0;
1285} 1314}
1286 1315
1316struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1317{
1318 if (x86_pmu.guest_get_msrs)
1319 return x86_pmu.guest_get_msrs(nr);
1320 *nr = 0;
1321 return NULL;
1322}
1323EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1324
1325static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1326{
1327 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1328 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1329
1330 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1331 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1332 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
1333
1334 *nr = 1;
1335 return arr;
1336}
1337
1338static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1339{
1340 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1341 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1342 int idx;
1343
1344 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1345 struct perf_event *event = cpuc->events[idx];
1346
1347 arr[idx].msr = x86_pmu_config_addr(idx);
1348 arr[idx].host = arr[idx].guest = 0;
1349
1350 if (!test_bit(idx, cpuc->active_mask))
1351 continue;
1352
1353 arr[idx].host = arr[idx].guest =
1354 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
1355
1356 if (event->attr.exclude_host)
1357 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1358 else if (event->attr.exclude_guest)
1359 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1360 }
1361
1362 *nr = x86_pmu.num_counters;
1363 return arr;
1364}
1365
1366static void core_pmu_enable_event(struct perf_event *event)
1367{
1368 if (!event->attr.exclude_host)
1369 x86_pmu_enable_event(event);
1370}
1371
1372static void core_pmu_enable_all(int added)
1373{
1374 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1375 int idx;
1376
1377 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1378 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
1379
1380 if (!test_bit(idx, cpuc->active_mask) ||
1381 cpuc->events[idx]->attr.exclude_host)
1382 continue;
1383
1384 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1385 }
1386}
1387
1287static __initconst const struct x86_pmu core_pmu = { 1388static __initconst const struct x86_pmu core_pmu = {
1288 .name = "core", 1389 .name = "core",
1289 .handle_irq = x86_pmu_handle_irq, 1390 .handle_irq = x86_pmu_handle_irq,
1290 .disable_all = x86_pmu_disable_all, 1391 .disable_all = x86_pmu_disable_all,
1291 .enable_all = x86_pmu_enable_all, 1392 .enable_all = core_pmu_enable_all,
1292 .enable = x86_pmu_enable_event, 1393 .enable = core_pmu_enable_event,
1293 .disable = x86_pmu_disable_event, 1394 .disable = x86_pmu_disable_event,
1294 .hw_config = x86_pmu_hw_config, 1395 .hw_config = x86_pmu_hw_config,
1295 .schedule_events = x86_schedule_events, 1396 .schedule_events = x86_schedule_events,
@@ -1307,9 +1408,10 @@ static __initconst const struct x86_pmu core_pmu = {
1307 .get_event_constraints = intel_get_event_constraints, 1408 .get_event_constraints = intel_get_event_constraints,
1308 .put_event_constraints = intel_put_event_constraints, 1409 .put_event_constraints = intel_put_event_constraints,
1309 .event_constraints = intel_core_event_constraints, 1410 .event_constraints = intel_core_event_constraints,
1411 .guest_get_msrs = core_guest_get_msrs,
1310}; 1412};
1311 1413
1312static struct intel_shared_regs *allocate_shared_regs(int cpu) 1414struct intel_shared_regs *allocate_shared_regs(int cpu)
1313{ 1415{
1314 struct intel_shared_regs *regs; 1416 struct intel_shared_regs *regs;
1315 int i; 1417 int i;
@@ -1362,7 +1464,7 @@ static void intel_pmu_cpu_starting(int cpu)
1362 1464
1363 pc = per_cpu(cpu_hw_events, i).shared_regs; 1465 pc = per_cpu(cpu_hw_events, i).shared_regs;
1364 if (pc && pc->core_id == core_id) { 1466 if (pc && pc->core_id == core_id) {
1365 kfree(cpuc->shared_regs); 1467 cpuc->kfree_on_online = cpuc->shared_regs;
1366 cpuc->shared_regs = pc; 1468 cpuc->shared_regs = pc;
1367 break; 1469 break;
1368 } 1470 }
@@ -1413,6 +1515,7 @@ static __initconst const struct x86_pmu intel_pmu = {
1413 .cpu_prepare = intel_pmu_cpu_prepare, 1515 .cpu_prepare = intel_pmu_cpu_prepare,
1414 .cpu_starting = intel_pmu_cpu_starting, 1516 .cpu_starting = intel_pmu_cpu_starting,
1415 .cpu_dying = intel_pmu_cpu_dying, 1517 .cpu_dying = intel_pmu_cpu_dying,
1518 .guest_get_msrs = intel_guest_get_msrs,
1416}; 1519};
1417 1520
1418static void intel_clovertown_quirks(void) 1521static void intel_clovertown_quirks(void)
@@ -1441,7 +1544,7 @@ static void intel_clovertown_quirks(void)
1441 x86_pmu.pebs_constraints = NULL; 1544 x86_pmu.pebs_constraints = NULL;
1442} 1545}
1443 1546
1444static __init int intel_pmu_init(void) 1547__init int intel_pmu_init(void)
1445{ 1548{
1446 union cpuid10_edx edx; 1549 union cpuid10_edx edx;
1447 union cpuid10_eax eax; 1550 union cpuid10_eax eax;
@@ -1597,7 +1700,7 @@ static __init int intel_pmu_init(void)
1597 intel_pmu_lbr_init_nhm(); 1700 intel_pmu_lbr_init_nhm();
1598 1701
1599 x86_pmu.event_constraints = intel_snb_event_constraints; 1702 x86_pmu.event_constraints = intel_snb_event_constraints;
1600 x86_pmu.pebs_constraints = intel_snb_pebs_events; 1703 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
1601 x86_pmu.extra_regs = intel_snb_extra_regs; 1704 x86_pmu.extra_regs = intel_snb_extra_regs;
1602 /* all extra regs are per-cpu when HT is on */ 1705 /* all extra regs are per-cpu when HT is on */
1603 x86_pmu.er_flags |= ERF_HAS_RSP_1; 1706 x86_pmu.er_flags |= ERF_HAS_RSP_1;
@@ -1628,16 +1731,3 @@ static __init int intel_pmu_init(void)
1628 } 1731 }
1629 return 0; 1732 return 0;
1630} 1733}
1631
1632#else /* CONFIG_CPU_SUP_INTEL */
1633
1634static int intel_pmu_init(void)
1635{
1636 return 0;
1637}
1638
1639static struct intel_shared_regs *allocate_shared_regs(int cpu)
1640{
1641 return NULL;
1642}
1643#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 1b1ef3addcfd..c0d238f49db8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -1,7 +1,10 @@
1#ifdef CONFIG_CPU_SUP_INTEL 1#include <linux/bitops.h>
2#include <linux/types.h>
3#include <linux/slab.h>
2 4
3/* The maximal number of PEBS events: */ 5#include <asm/perf_event.h>
4#define MAX_PEBS_EVENTS 4 6
7#include "perf_event.h"
5 8
6/* The size of a BTS record in bytes: */ 9/* The size of a BTS record in bytes: */
7#define BTS_RECORD_SIZE 24 10#define BTS_RECORD_SIZE 24
@@ -37,24 +40,7 @@ struct pebs_record_nhm {
37 u64 status, dla, dse, lat; 40 u64 status, dla, dse, lat;
38}; 41};
39 42
40/* 43void init_debug_store_on_cpu(int cpu)
41 * A debug store configuration.
42 *
43 * We only support architectures that use 64bit fields.
44 */
45struct debug_store {
46 u64 bts_buffer_base;
47 u64 bts_index;
48 u64 bts_absolute_maximum;
49 u64 bts_interrupt_threshold;
50 u64 pebs_buffer_base;
51 u64 pebs_index;
52 u64 pebs_absolute_maximum;
53 u64 pebs_interrupt_threshold;
54 u64 pebs_event_reset[MAX_PEBS_EVENTS];
55};
56
57static void init_debug_store_on_cpu(int cpu)
58{ 44{
59 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 45 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
60 46
@@ -66,7 +52,7 @@ static void init_debug_store_on_cpu(int cpu)
66 (u32)((u64)(unsigned long)ds >> 32)); 52 (u32)((u64)(unsigned long)ds >> 32));
67} 53}
68 54
69static void fini_debug_store_on_cpu(int cpu) 55void fini_debug_store_on_cpu(int cpu)
70{ 56{
71 if (!per_cpu(cpu_hw_events, cpu).ds) 57 if (!per_cpu(cpu_hw_events, cpu).ds)
72 return; 58 return;
@@ -175,7 +161,7 @@ static void release_ds_buffer(int cpu)
175 kfree(ds); 161 kfree(ds);
176} 162}
177 163
178static void release_ds_buffers(void) 164void release_ds_buffers(void)
179{ 165{
180 int cpu; 166 int cpu;
181 167
@@ -194,7 +180,7 @@ static void release_ds_buffers(void)
194 put_online_cpus(); 180 put_online_cpus();
195} 181}
196 182
197static void reserve_ds_buffers(void) 183void reserve_ds_buffers(void)
198{ 184{
199 int bts_err = 0, pebs_err = 0; 185 int bts_err = 0, pebs_err = 0;
200 int cpu; 186 int cpu;
@@ -260,10 +246,10 @@ static void reserve_ds_buffers(void)
260 * BTS 246 * BTS
261 */ 247 */
262 248
263static struct event_constraint bts_constraint = 249struct event_constraint bts_constraint =
264 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); 250 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
265 251
266static void intel_pmu_enable_bts(u64 config) 252void intel_pmu_enable_bts(u64 config)
267{ 253{
268 unsigned long debugctlmsr; 254 unsigned long debugctlmsr;
269 255
@@ -282,7 +268,7 @@ static void intel_pmu_enable_bts(u64 config)
282 update_debugctlmsr(debugctlmsr); 268 update_debugctlmsr(debugctlmsr);
283} 269}
284 270
285static void intel_pmu_disable_bts(void) 271void intel_pmu_disable_bts(void)
286{ 272{
287 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 273 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
288 unsigned long debugctlmsr; 274 unsigned long debugctlmsr;
@@ -299,7 +285,7 @@ static void intel_pmu_disable_bts(void)
299 update_debugctlmsr(debugctlmsr); 285 update_debugctlmsr(debugctlmsr);
300} 286}
301 287
302static int intel_pmu_drain_bts_buffer(void) 288int intel_pmu_drain_bts_buffer(void)
303{ 289{
304 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 290 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
305 struct debug_store *ds = cpuc->ds; 291 struct debug_store *ds = cpuc->ds;
@@ -361,7 +347,7 @@ static int intel_pmu_drain_bts_buffer(void)
361/* 347/*
362 * PEBS 348 * PEBS
363 */ 349 */
364static struct event_constraint intel_core2_pebs_event_constraints[] = { 350struct event_constraint intel_core2_pebs_event_constraints[] = {
365 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ 351 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
366 INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ 352 INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
367 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ 353 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
@@ -370,14 +356,14 @@ static struct event_constraint intel_core2_pebs_event_constraints[] = {
370 EVENT_CONSTRAINT_END 356 EVENT_CONSTRAINT_END
371}; 357};
372 358
373static struct event_constraint intel_atom_pebs_event_constraints[] = { 359struct event_constraint intel_atom_pebs_event_constraints[] = {
374 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ 360 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
375 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ 361 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
376 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ 362 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
377 EVENT_CONSTRAINT_END 363 EVENT_CONSTRAINT_END
378}; 364};
379 365
380static struct event_constraint intel_nehalem_pebs_event_constraints[] = { 366struct event_constraint intel_nehalem_pebs_event_constraints[] = {
381 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ 367 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
382 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 368 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
383 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ 369 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
@@ -392,7 +378,7 @@ static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
392 EVENT_CONSTRAINT_END 378 EVENT_CONSTRAINT_END
393}; 379};
394 380
395static struct event_constraint intel_westmere_pebs_event_constraints[] = { 381struct event_constraint intel_westmere_pebs_event_constraints[] = {
396 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ 382 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
397 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 383 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
398 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ 384 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
@@ -407,7 +393,7 @@ static struct event_constraint intel_westmere_pebs_event_constraints[] = {
407 EVENT_CONSTRAINT_END 393 EVENT_CONSTRAINT_END
408}; 394};
409 395
410static struct event_constraint intel_snb_pebs_events[] = { 396struct event_constraint intel_snb_pebs_event_constraints[] = {
411 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ 397 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
412 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ 398 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
413 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ 399 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
@@ -428,8 +414,7 @@ static struct event_constraint intel_snb_pebs_events[] = {
428 EVENT_CONSTRAINT_END 414 EVENT_CONSTRAINT_END
429}; 415};
430 416
431static struct event_constraint * 417struct event_constraint *intel_pebs_constraints(struct perf_event *event)
432intel_pebs_constraints(struct perf_event *event)
433{ 418{
434 struct event_constraint *c; 419 struct event_constraint *c;
435 420
@@ -446,7 +431,7 @@ intel_pebs_constraints(struct perf_event *event)
446 return &emptyconstraint; 431 return &emptyconstraint;
447} 432}
448 433
449static void intel_pmu_pebs_enable(struct perf_event *event) 434void intel_pmu_pebs_enable(struct perf_event *event)
450{ 435{
451 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 436 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
452 struct hw_perf_event *hwc = &event->hw; 437 struct hw_perf_event *hwc = &event->hw;
@@ -460,7 +445,7 @@ static void intel_pmu_pebs_enable(struct perf_event *event)
460 intel_pmu_lbr_enable(event); 445 intel_pmu_lbr_enable(event);
461} 446}
462 447
463static void intel_pmu_pebs_disable(struct perf_event *event) 448void intel_pmu_pebs_disable(struct perf_event *event)
464{ 449{
465 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 450 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
466 struct hw_perf_event *hwc = &event->hw; 451 struct hw_perf_event *hwc = &event->hw;
@@ -475,7 +460,7 @@ static void intel_pmu_pebs_disable(struct perf_event *event)
475 intel_pmu_lbr_disable(event); 460 intel_pmu_lbr_disable(event);
476} 461}
477 462
478static void intel_pmu_pebs_enable_all(void) 463void intel_pmu_pebs_enable_all(void)
479{ 464{
480 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 465 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
481 466
@@ -483,7 +468,7 @@ static void intel_pmu_pebs_enable_all(void)
483 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 468 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
484} 469}
485 470
486static void intel_pmu_pebs_disable_all(void) 471void intel_pmu_pebs_disable_all(void)
487{ 472{
488 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 473 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
489 474
@@ -576,8 +561,6 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
576 return 0; 561 return 0;
577} 562}
578 563
579static int intel_pmu_save_and_restart(struct perf_event *event);
580
581static void __intel_pmu_pebs_event(struct perf_event *event, 564static void __intel_pmu_pebs_event(struct perf_event *event,
582 struct pt_regs *iregs, void *__pebs) 565 struct pt_regs *iregs, void *__pebs)
583{ 566{
@@ -716,7 +699,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
716 * BTS, PEBS probe and setup 699 * BTS, PEBS probe and setup
717 */ 700 */
718 701
719static void intel_ds_init(void) 702void intel_ds_init(void)
720{ 703{
721 /* 704 /*
722 * No support for 32bit formats 705 * No support for 32bit formats
@@ -749,15 +732,3 @@ static void intel_ds_init(void)
749 } 732 }
750 } 733 }
751} 734}
752
753#else /* CONFIG_CPU_SUP_INTEL */
754
755static void reserve_ds_buffers(void)
756{
757}
758
759static void release_ds_buffers(void)
760{
761}
762
763#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index d202c1bece1a..3fab3de3ce96 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -1,4 +1,10 @@
1#ifdef CONFIG_CPU_SUP_INTEL 1#include <linux/perf_event.h>
2#include <linux/types.h>
3
4#include <asm/perf_event.h>
5#include <asm/msr.h>
6
7#include "perf_event.h"
2 8
3enum { 9enum {
4 LBR_FORMAT_32 = 0x00, 10 LBR_FORMAT_32 = 0x00,
@@ -48,7 +54,7 @@ static void intel_pmu_lbr_reset_64(void)
48 } 54 }
49} 55}
50 56
51static void intel_pmu_lbr_reset(void) 57void intel_pmu_lbr_reset(void)
52{ 58{
53 if (!x86_pmu.lbr_nr) 59 if (!x86_pmu.lbr_nr)
54 return; 60 return;
@@ -59,7 +65,7 @@ static void intel_pmu_lbr_reset(void)
59 intel_pmu_lbr_reset_64(); 65 intel_pmu_lbr_reset_64();
60} 66}
61 67
62static void intel_pmu_lbr_enable(struct perf_event *event) 68void intel_pmu_lbr_enable(struct perf_event *event)
63{ 69{
64 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 70 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
65 71
@@ -81,7 +87,7 @@ static void intel_pmu_lbr_enable(struct perf_event *event)
81 cpuc->lbr_users++; 87 cpuc->lbr_users++;
82} 88}
83 89
84static void intel_pmu_lbr_disable(struct perf_event *event) 90void intel_pmu_lbr_disable(struct perf_event *event)
85{ 91{
86 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 92 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
87 93
@@ -95,7 +101,7 @@ static void intel_pmu_lbr_disable(struct perf_event *event)
95 __intel_pmu_lbr_disable(); 101 __intel_pmu_lbr_disable();
96} 102}
97 103
98static void intel_pmu_lbr_enable_all(void) 104void intel_pmu_lbr_enable_all(void)
99{ 105{
100 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 106 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
101 107
@@ -103,7 +109,7 @@ static void intel_pmu_lbr_enable_all(void)
103 __intel_pmu_lbr_enable(); 109 __intel_pmu_lbr_enable();
104} 110}
105 111
106static void intel_pmu_lbr_disable_all(void) 112void intel_pmu_lbr_disable_all(void)
107{ 113{
108 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 114 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
109 115
@@ -178,7 +184,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
178 cpuc->lbr_stack.nr = i; 184 cpuc->lbr_stack.nr = i;
179} 185}
180 186
181static void intel_pmu_lbr_read(void) 187void intel_pmu_lbr_read(void)
182{ 188{
183 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 189 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
184 190
@@ -191,7 +197,7 @@ static void intel_pmu_lbr_read(void)
191 intel_pmu_lbr_read_64(cpuc); 197 intel_pmu_lbr_read_64(cpuc);
192} 198}
193 199
194static void intel_pmu_lbr_init_core(void) 200void intel_pmu_lbr_init_core(void)
195{ 201{
196 x86_pmu.lbr_nr = 4; 202 x86_pmu.lbr_nr = 4;
197 x86_pmu.lbr_tos = 0x01c9; 203 x86_pmu.lbr_tos = 0x01c9;
@@ -199,7 +205,7 @@ static void intel_pmu_lbr_init_core(void)
199 x86_pmu.lbr_to = 0x60; 205 x86_pmu.lbr_to = 0x60;
200} 206}
201 207
202static void intel_pmu_lbr_init_nhm(void) 208void intel_pmu_lbr_init_nhm(void)
203{ 209{
204 x86_pmu.lbr_nr = 16; 210 x86_pmu.lbr_nr = 16;
205 x86_pmu.lbr_tos = 0x01c9; 211 x86_pmu.lbr_tos = 0x01c9;
@@ -207,12 +213,10 @@ static void intel_pmu_lbr_init_nhm(void)
207 x86_pmu.lbr_to = 0x6c0; 213 x86_pmu.lbr_to = 0x6c0;
208} 214}
209 215
210static void intel_pmu_lbr_init_atom(void) 216void intel_pmu_lbr_init_atom(void)
211{ 217{
212 x86_pmu.lbr_nr = 8; 218 x86_pmu.lbr_nr = 8;
213 x86_pmu.lbr_tos = 0x01c9; 219 x86_pmu.lbr_tos = 0x01c9;
214 x86_pmu.lbr_from = 0x40; 220 x86_pmu.lbr_from = 0x40;
215 x86_pmu.lbr_to = 0x60; 221 x86_pmu.lbr_to = 0x60;
216} 222}
217
218#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 7809d2bcb209..492bf1358a7c 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -7,9 +7,13 @@
7 * For licencing details see kernel-base/COPYING 7 * For licencing details see kernel-base/COPYING
8 */ 8 */
9 9
10#ifdef CONFIG_CPU_SUP_INTEL 10#include <linux/perf_event.h>
11 11
12#include <asm/perf_event_p4.h> 12#include <asm/perf_event_p4.h>
13#include <asm/hardirq.h>
14#include <asm/apic.h>
15
16#include "perf_event.h"
13 17
14#define P4_CNTR_LIMIT 3 18#define P4_CNTR_LIMIT 3
15/* 19/*
@@ -1303,7 +1307,7 @@ static __initconst const struct x86_pmu p4_pmu = {
1303 .perfctr_second_write = 1, 1307 .perfctr_second_write = 1,
1304}; 1308};
1305 1309
1306static __init int p4_pmu_init(void) 1310__init int p4_pmu_init(void)
1307{ 1311{
1308 unsigned int low, high; 1312 unsigned int low, high;
1309 1313
@@ -1326,5 +1330,3 @@ static __init int p4_pmu_init(void)
1326 1330
1327 return 0; 1331 return 0;
1328} 1332}
1329
1330#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 20c097e33860..c7181befecde 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -1,4 +1,7 @@
1#ifdef CONFIG_CPU_SUP_INTEL 1#include <linux/perf_event.h>
2#include <linux/types.h>
3
4#include "perf_event.h"
2 5
3/* 6/*
4 * Not sure about some of these 7 * Not sure about some of these
@@ -114,7 +117,7 @@ static __initconst const struct x86_pmu p6_pmu = {
114 .event_constraints = p6_event_constraints, 117 .event_constraints = p6_event_constraints,
115}; 118};
116 119
117static __init int p6_pmu_init(void) 120__init int p6_pmu_init(void)
118{ 121{
119 switch (boot_cpu_data.x86_model) { 122 switch (boot_cpu_data.x86_model) {
120 case 1: 123 case 1:
@@ -138,5 +141,3 @@ static __init int p6_pmu_init(void)
138 141
139 return 0; 142 return 0;
140} 143}
141
142#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 764c7c2b1811..13ad89971d47 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -32,15 +32,12 @@ int in_crash_kexec;
32 32
33#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 33#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
34 34
35static void kdump_nmi_callback(int cpu, struct die_args *args) 35static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
36{ 36{
37 struct pt_regs *regs;
38#ifdef CONFIG_X86_32 37#ifdef CONFIG_X86_32
39 struct pt_regs fixed_regs; 38 struct pt_regs fixed_regs;
40#endif 39#endif
41 40
42 regs = args->regs;
43
44#ifdef CONFIG_X86_32 41#ifdef CONFIG_X86_32
45 if (!user_mode_vm(regs)) { 42 if (!user_mode_vm(regs)) {
46 crash_fixup_ss_esp(&fixed_regs, regs); 43 crash_fixup_ss_esp(&fixed_regs, regs);
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 6419bb05ecd5..faf8d5e74b0b 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -331,10 +331,15 @@ ENDPROC(native_usergs_sysret64)
3311: incl PER_CPU_VAR(irq_count) 3311: incl PER_CPU_VAR(irq_count)
332 jne 2f 332 jne 2f
333 mov PER_CPU_VAR(irq_stack_ptr),%rsp 333 mov PER_CPU_VAR(irq_stack_ptr),%rsp
334 EMPTY_FRAME 0 334 CFI_DEF_CFA_REGISTER rsi
335 335
3362: /* Store previous stack value */ 3362: /* Store previous stack value */
337 pushq %rsi 337 pushq %rsi
338 CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
339 0x77 /* DW_OP_breg7 */, 0, \
340 0x06 /* DW_OP_deref */, \
341 0x08 /* DW_OP_const1u */, SS+8-RBP, \
342 0x22 /* DW_OP_plus */
338 /* We entered an interrupt context - irqs are off: */ 343 /* We entered an interrupt context - irqs are off: */
339 TRACE_IRQS_OFF 344 TRACE_IRQS_OFF
340 .endm 345 .endm
@@ -788,7 +793,6 @@ END(interrupt)
788 subq $ORIG_RAX-RBP, %rsp 793 subq $ORIG_RAX-RBP, %rsp
789 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP 794 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
790 SAVE_ARGS_IRQ 795 SAVE_ARGS_IRQ
791 PARTIAL_FRAME 0
792 call \func 796 call \func
793 .endm 797 .endm
794 798
@@ -813,10 +817,10 @@ ret_from_intr:
813 817
814 /* Restore saved previous stack */ 818 /* Restore saved previous stack */
815 popq %rsi 819 popq %rsi
816 leaq 16(%rsi), %rsp 820 CFI_DEF_CFA_REGISTER rsi
817 821 leaq ARGOFFSET-RBP(%rsi), %rsp
818 CFI_DEF_CFA_REGISTER rsp 822 CFI_DEF_CFA_REGISTER rsp
819 CFI_ADJUST_CFA_OFFSET -16 823 CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
820 824
821exit_intr: 825exit_intr:
822 GET_THREAD_INFO(%rcx) 826 GET_THREAD_INFO(%rcx)
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 3fee346ef545..cacdd46d184d 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -42,7 +42,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
42 put_online_cpus(); 42 put_online_cpus();
43} 43}
44 44
45void arch_jump_label_text_poke_early(jump_label_t addr) 45void __init_or_module arch_jump_label_text_poke_early(jump_label_t addr)
46{ 46{
47 text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5], 47 text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5],
48 JUMP_LABEL_NOP_SIZE); 48 JUMP_LABEL_NOP_SIZE);
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 00354d4919a9..faba5771acad 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -511,28 +511,37 @@ single_step_cont(struct pt_regs *regs, struct die_args *args)
511 511
512static int was_in_debug_nmi[NR_CPUS]; 512static int was_in_debug_nmi[NR_CPUS];
513 513
514static int __kgdb_notify(struct die_args *args, unsigned long cmd) 514static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs)
515{ 515{
516 struct pt_regs *regs = args->regs;
517
518 switch (cmd) { 516 switch (cmd) {
519 case DIE_NMI: 517 case NMI_LOCAL:
520 if (atomic_read(&kgdb_active) != -1) { 518 if (atomic_read(&kgdb_active) != -1) {
521 /* KGDB CPU roundup */ 519 /* KGDB CPU roundup */
522 kgdb_nmicallback(raw_smp_processor_id(), regs); 520 kgdb_nmicallback(raw_smp_processor_id(), regs);
523 was_in_debug_nmi[raw_smp_processor_id()] = 1; 521 was_in_debug_nmi[raw_smp_processor_id()] = 1;
524 touch_nmi_watchdog(); 522 touch_nmi_watchdog();
525 return NOTIFY_STOP; 523 return NMI_HANDLED;
526 } 524 }
527 return NOTIFY_DONE; 525 break;
528 526
529 case DIE_NMIUNKNOWN: 527 case NMI_UNKNOWN:
530 if (was_in_debug_nmi[raw_smp_processor_id()]) { 528 if (was_in_debug_nmi[raw_smp_processor_id()]) {
531 was_in_debug_nmi[raw_smp_processor_id()] = 0; 529 was_in_debug_nmi[raw_smp_processor_id()] = 0;
532 return NOTIFY_STOP; 530 return NMI_HANDLED;
533 } 531 }
534 return NOTIFY_DONE; 532 break;
533 default:
534 /* do nothing */
535 break;
536 }
537 return NMI_DONE;
538}
539
540static int __kgdb_notify(struct die_args *args, unsigned long cmd)
541{
542 struct pt_regs *regs = args->regs;
535 543
544 switch (cmd) {
536 case DIE_DEBUG: 545 case DIE_DEBUG:
537 if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { 546 if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
538 if (user_mode(regs)) 547 if (user_mode(regs))
@@ -590,11 +599,6 @@ kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
590 599
591static struct notifier_block kgdb_notifier = { 600static struct notifier_block kgdb_notifier = {
592 .notifier_call = kgdb_notify, 601 .notifier_call = kgdb_notify,
593
594 /*
595 * Lowest-prio notifier priority, we want to be notified last:
596 */
597 .priority = NMI_LOCAL_LOW_PRIOR,
598}; 602};
599 603
600/** 604/**
@@ -605,7 +609,31 @@ static struct notifier_block kgdb_notifier = {
605 */ 609 */
606int kgdb_arch_init(void) 610int kgdb_arch_init(void)
607{ 611{
608 return register_die_notifier(&kgdb_notifier); 612 int retval;
613
614 retval = register_die_notifier(&kgdb_notifier);
615 if (retval)
616 goto out;
617
618 retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler,
619 0, "kgdb");
620 if (retval)
621 goto out1;
622
623 retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler,
624 0, "kgdb");
625
626 if (retval)
627 goto out2;
628
629 return retval;
630
631out2:
632 unregister_nmi_handler(NMI_LOCAL, "kgdb");
633out1:
634 unregister_die_notifier(&kgdb_notifier);
635out:
636 return retval;
609} 637}
610 638
611static void kgdb_hw_overflow_handler(struct perf_event *event, 639static void kgdb_hw_overflow_handler(struct perf_event *event,
@@ -673,6 +701,8 @@ void kgdb_arch_exit(void)
673 breakinfo[i].pev = NULL; 701 breakinfo[i].pev = NULL;
674 } 702 }
675 } 703 }
704 unregister_nmi_handler(NMI_UNKNOWN, "kgdb");
705 unregister_nmi_handler(NMI_LOCAL, "kgdb");
676 unregister_die_notifier(&kgdb_notifier); 706 unregister_die_notifier(&kgdb_notifier);
677} 707}
678 708
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 794bc95134cd..7da647d8b64c 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -75,10 +75,11 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
75 /* 75 /*
76 * Undefined/reserved opcodes, conditional jump, Opcode Extension 76 * Undefined/reserved opcodes, conditional jump, Opcode Extension
77 * Groups, and some special opcodes can not boost. 77 * Groups, and some special opcodes can not boost.
78 * This is non-const to keep gcc from statically optimizing it out, as 78 * This is non-const and volatile to keep gcc from statically
79 * variable_test_bit makes gcc think only *(unsigned long*) is used. 79 * optimizing it out, as variable_test_bit makes gcc think only
80 * *(unsigned long*) is used.
80 */ 81 */
81static u32 twobyte_is_boostable[256 / 32] = { 82static volatile u32 twobyte_is_boostable[256 / 32] = {
82 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 83 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
83 /* ---------------------------------------------- */ 84 /* ---------------------------------------------- */
84 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ 85 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
new file mode 100644
index 000000000000..7ec5bd140b87
--- /dev/null
+++ b/arch/x86/kernel/nmi.c
@@ -0,0 +1,433 @@
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2011 Don Zickus Red Hat, Inc.
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
9
10/*
11 * Handle hardware traps and faults.
12 */
13#include <linux/spinlock.h>
14#include <linux/kprobes.h>
15#include <linux/kdebug.h>
16#include <linux/nmi.h>
17#include <linux/delay.h>
18#include <linux/hardirq.h>
19#include <linux/slab.h>
20
21#include <linux/mca.h>
22
23#if defined(CONFIG_EDAC)
24#include <linux/edac.h>
25#endif
26
27#include <linux/atomic.h>
28#include <asm/traps.h>
29#include <asm/mach_traps.h>
30#include <asm/nmi.h>
31
32#define NMI_MAX_NAMELEN 16
33struct nmiaction {
34 struct list_head list;
35 nmi_handler_t handler;
36 unsigned int flags;
37 char *name;
38};
39
40struct nmi_desc {
41 spinlock_t lock;
42 struct list_head head;
43};
44
45static struct nmi_desc nmi_desc[NMI_MAX] =
46{
47 {
48 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
49 .head = LIST_HEAD_INIT(nmi_desc[0].head),
50 },
51 {
52 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
53 .head = LIST_HEAD_INIT(nmi_desc[1].head),
54 },
55
56};
57
58struct nmi_stats {
59 unsigned int normal;
60 unsigned int unknown;
61 unsigned int external;
62 unsigned int swallow;
63};
64
65static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
66
67static int ignore_nmis;
68
69int unknown_nmi_panic;
70/*
71 * Prevent NMI reason port (0x61) being accessed simultaneously, can
72 * only be used in NMI handler.
73 */
74static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
75
76static int __init setup_unknown_nmi_panic(char *str)
77{
78 unknown_nmi_panic = 1;
79 return 1;
80}
81__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
82
83#define nmi_to_desc(type) (&nmi_desc[type])
84
85static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
86{
87 struct nmi_desc *desc = nmi_to_desc(type);
88 struct nmiaction *a;
89 int handled=0;
90
91 rcu_read_lock();
92
93 /*
94 * NMIs are edge-triggered, which means if you have enough
95 * of them concurrently, you can lose some because only one
96 * can be latched at any given time. Walk the whole list
97 * to handle those situations.
98 */
99 list_for_each_entry_rcu(a, &desc->head, list)
100 handled += a->handler(type, regs);
101
102 rcu_read_unlock();
103
104 /* return total number of NMI events handled */
105 return handled;
106}
107
108static int __setup_nmi(unsigned int type, struct nmiaction *action)
109{
110 struct nmi_desc *desc = nmi_to_desc(type);
111 unsigned long flags;
112
113 spin_lock_irqsave(&desc->lock, flags);
114
115 /*
116 * most handlers of type NMI_UNKNOWN never return because
117 * they just assume the NMI is theirs. Just a sanity check
118 * to manage expectations
119 */
120 WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
121
122 /*
123 * some handlers need to be executed first otherwise a fake
124 * event confuses some handlers (kdump uses this flag)
125 */
126 if (action->flags & NMI_FLAG_FIRST)
127 list_add_rcu(&action->list, &desc->head);
128 else
129 list_add_tail_rcu(&action->list, &desc->head);
130
131 spin_unlock_irqrestore(&desc->lock, flags);
132 return 0;
133}
134
135static struct nmiaction *__free_nmi(unsigned int type, const char *name)
136{
137 struct nmi_desc *desc = nmi_to_desc(type);
138 struct nmiaction *n;
139 unsigned long flags;
140
141 spin_lock_irqsave(&desc->lock, flags);
142
143 list_for_each_entry_rcu(n, &desc->head, list) {
144 /*
145 * the name passed in to describe the nmi handler
146 * is used as the lookup key
147 */
148 if (!strcmp(n->name, name)) {
149 WARN(in_nmi(),
150 "Trying to free NMI (%s) from NMI context!\n", n->name);
151 list_del_rcu(&n->list);
152 break;
153 }
154 }
155
156 spin_unlock_irqrestore(&desc->lock, flags);
157 synchronize_rcu();
158 return (n);
159}
160
161int register_nmi_handler(unsigned int type, nmi_handler_t handler,
162 unsigned long nmiflags, const char *devname)
163{
164 struct nmiaction *action;
165 int retval = -ENOMEM;
166
167 if (!handler)
168 return -EINVAL;
169
170 action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL);
171 if (!action)
172 goto fail_action;
173
174 action->handler = handler;
175 action->flags = nmiflags;
176 action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL);
177 if (!action->name)
178 goto fail_action_name;
179
180 retval = __setup_nmi(type, action);
181
182 if (retval)
183 goto fail_setup_nmi;
184
185 return retval;
186
187fail_setup_nmi:
188 kfree(action->name);
189fail_action_name:
190 kfree(action);
191fail_action:
192
193 return retval;
194}
195EXPORT_SYMBOL_GPL(register_nmi_handler);
196
197void unregister_nmi_handler(unsigned int type, const char *name)
198{
199 struct nmiaction *a;
200
201 a = __free_nmi(type, name);
202 if (a) {
203 kfree(a->name);
204 kfree(a);
205 }
206}
207
208EXPORT_SYMBOL_GPL(unregister_nmi_handler);
209
210static notrace __kprobes void
211pci_serr_error(unsigned char reason, struct pt_regs *regs)
212{
213 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
214 reason, smp_processor_id());
215
216 /*
217 * On some machines, PCI SERR line is used to report memory
218 * errors. EDAC makes use of it.
219 */
220#if defined(CONFIG_EDAC)
221 if (edac_handler_set()) {
222 edac_atomic_assert_error();
223 return;
224 }
225#endif
226
227 if (panic_on_unrecovered_nmi)
228 panic("NMI: Not continuing");
229
230 pr_emerg("Dazed and confused, but trying to continue\n");
231
232 /* Clear and disable the PCI SERR error line. */
233 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
234 outb(reason, NMI_REASON_PORT);
235}
236
237static notrace __kprobes void
238io_check_error(unsigned char reason, struct pt_regs *regs)
239{
240 unsigned long i;
241
242 pr_emerg(
243 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
244 reason, smp_processor_id());
245 show_registers(regs);
246
247 if (panic_on_io_nmi)
248 panic("NMI IOCK error: Not continuing");
249
250 /* Re-enable the IOCK line, wait for a few seconds */
251 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
252 outb(reason, NMI_REASON_PORT);
253
254 i = 20000;
255 while (--i) {
256 touch_nmi_watchdog();
257 udelay(100);
258 }
259
260 reason &= ~NMI_REASON_CLEAR_IOCHK;
261 outb(reason, NMI_REASON_PORT);
262}
263
264static notrace __kprobes void
265unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
266{
267 int handled;
268
269 /*
270 * Use 'false' as back-to-back NMIs are dealt with one level up.
271 * Of course this makes having multiple 'unknown' handlers useless
272 * as only the first one is ever run (unless it can actually determine
273 * if it caused the NMI)
274 */
275 handled = nmi_handle(NMI_UNKNOWN, regs, false);
276 if (handled) {
277 __this_cpu_add(nmi_stats.unknown, handled);
278 return;
279 }
280
281 __this_cpu_add(nmi_stats.unknown, 1);
282
283#ifdef CONFIG_MCA
284 /*
285 * Might actually be able to figure out what the guilty party
286 * is:
287 */
288 if (MCA_bus) {
289 mca_handle_nmi();
290 return;
291 }
292#endif
293 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
294 reason, smp_processor_id());
295
296 pr_emerg("Do you have a strange power saving mode enabled?\n");
297 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
298 panic("NMI: Not continuing");
299
300 pr_emerg("Dazed and confused, but trying to continue\n");
301}
302
303static DEFINE_PER_CPU(bool, swallow_nmi);
304static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
305
306static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
307{
308 unsigned char reason = 0;
309 int handled;
310 bool b2b = false;
311
312 /*
313 * CPU-specific NMI must be processed before non-CPU-specific
314 * NMI, otherwise we may lose it, because the CPU-specific
315 * NMI can not be detected/processed on other CPUs.
316 */
317
318 /*
319 * Back-to-back NMIs are interesting because they can either
320 * be two NMI or more than two NMIs (any thing over two is dropped
321 * due to NMI being edge-triggered). If this is the second half
322 * of the back-to-back NMI, assume we dropped things and process
323 * more handlers. Otherwise reset the 'swallow' NMI behaviour
324 */
325 if (regs->ip == __this_cpu_read(last_nmi_rip))
326 b2b = true;
327 else
328 __this_cpu_write(swallow_nmi, false);
329
330 __this_cpu_write(last_nmi_rip, regs->ip);
331
332 handled = nmi_handle(NMI_LOCAL, regs, b2b);
333 __this_cpu_add(nmi_stats.normal, handled);
334 if (handled) {
335 /*
336 * There are cases when a NMI handler handles multiple
337 * events in the current NMI. One of these events may
338 * be queued for in the next NMI. Because the event is
339 * already handled, the next NMI will result in an unknown
340 * NMI. Instead lets flag this for a potential NMI to
341 * swallow.
342 */
343 if (handled > 1)
344 __this_cpu_write(swallow_nmi, true);
345 return;
346 }
347
348 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
349 raw_spin_lock(&nmi_reason_lock);
350 reason = get_nmi_reason();
351
352 if (reason & NMI_REASON_MASK) {
353 if (reason & NMI_REASON_SERR)
354 pci_serr_error(reason, regs);
355 else if (reason & NMI_REASON_IOCHK)
356 io_check_error(reason, regs);
357#ifdef CONFIG_X86_32
358 /*
359 * Reassert NMI in case it became active
360 * meanwhile as it's edge-triggered:
361 */
362 reassert_nmi();
363#endif
364 __this_cpu_add(nmi_stats.external, 1);
365 raw_spin_unlock(&nmi_reason_lock);
366 return;
367 }
368 raw_spin_unlock(&nmi_reason_lock);
369
370 /*
371 * Only one NMI can be latched at a time. To handle
372 * this we may process multiple nmi handlers at once to
373 * cover the case where an NMI is dropped. The downside
374 * to this approach is we may process an NMI prematurely,
375 * while its real NMI is sitting latched. This will cause
376 * an unknown NMI on the next run of the NMI processing.
377 *
378 * We tried to flag that condition above, by setting the
379 * swallow_nmi flag when we process more than one event.
380 * This condition is also only present on the second half
381 * of a back-to-back NMI, so we flag that condition too.
382 *
383 * If both are true, we assume we already processed this
384 * NMI previously and we swallow it. Otherwise we reset
385 * the logic.
386 *
387 * There are scenarios where we may accidentally swallow
388 * a 'real' unknown NMI. For example, while processing
389 * a perf NMI another perf NMI comes in along with a
390 * 'real' unknown NMI. These two NMIs get combined into
391 * one (as descibed above). When the next NMI gets
392 * processed, it will be flagged by perf as handled, but
393 * noone will know that there was a 'real' unknown NMI sent
394 * also. As a result it gets swallowed. Or if the first
395 * perf NMI returns two events handled then the second
396 * NMI will get eaten by the logic below, again losing a
397 * 'real' unknown NMI. But this is the best we can do
398 * for now.
399 */
400 if (b2b && __this_cpu_read(swallow_nmi))
401 __this_cpu_add(nmi_stats.swallow, 1);
402 else
403 unknown_nmi_error(reason, regs);
404}
405
406dotraplinkage notrace __kprobes void
407do_nmi(struct pt_regs *regs, long error_code)
408{
409 nmi_enter();
410
411 inc_irq_stat(__nmi_count);
412
413 if (!ignore_nmis)
414 default_do_nmi(regs);
415
416 nmi_exit();
417}
418
419void stop_nmi(void)
420{
421 ignore_nmis++;
422}
423
424void restart_nmi(void)
425{
426 ignore_nmis--;
427}
428
429/* reset the back-to-back NMI logic */
430void local_touch_nmi(void)
431{
432 __this_cpu_write(last_nmi_rip, 0);
433}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index e7e3b019c439..b9b3b1a51643 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -49,7 +49,7 @@ void free_thread_xstate(struct task_struct *tsk)
49void free_thread_info(struct thread_info *ti) 49void free_thread_info(struct thread_info *ti)
50{ 50{
51 free_thread_xstate(ti->task); 51 free_thread_xstate(ti->task);
52 free_pages((unsigned long)ti, get_order(THREAD_SIZE)); 52 free_pages((unsigned long)ti, THREAD_ORDER);
53} 53}
54 54
55void arch_task_cache_init(void) 55void arch_task_cache_init(void)
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 2196c703c5e2..795b79f984c2 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -57,6 +57,7 @@
57#include <asm/idle.h> 57#include <asm/idle.h>
58#include <asm/syscalls.h> 58#include <asm/syscalls.h>
59#include <asm/debugreg.h> 59#include <asm/debugreg.h>
60#include <asm/nmi.h>
60 61
61asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 62asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
62 63
@@ -107,6 +108,7 @@ void cpu_idle(void)
107 if (cpu_is_offline(cpu)) 108 if (cpu_is_offline(cpu))
108 play_dead(); 109 play_dead();
109 110
111 local_touch_nmi();
110 local_irq_disable(); 112 local_irq_disable();
111 /* Don't trace irqs off for idle */ 113 /* Don't trace irqs off for idle */
112 stop_critical_timings(); 114 stop_critical_timings();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index f693e44e1bf6..3bd7e6eebf31 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -51,6 +51,7 @@
51#include <asm/idle.h> 51#include <asm/idle.h>
52#include <asm/syscalls.h> 52#include <asm/syscalls.h>
53#include <asm/debugreg.h> 53#include <asm/debugreg.h>
54#include <asm/nmi.h>
54 55
55asmlinkage extern void ret_from_fork(void); 56asmlinkage extern void ret_from_fork(void);
56 57
@@ -133,6 +134,7 @@ void cpu_idle(void)
133 * from here on, until they go to idle. 134 * from here on, until they go to idle.
134 * Otherwise, idle callbacks can misfire. 135 * Otherwise, idle callbacks can misfire.
135 */ 136 */
137 local_touch_nmi();
136 local_irq_disable(); 138 local_irq_disable();
137 enter_idle(); 139 enter_idle();
138 /* Don't trace irqs off for idle */ 140 /* Don't trace irqs off for idle */
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 9242436e9937..e334be1182b9 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -464,7 +464,7 @@ static inline void kb_wait(void)
464 } 464 }
465} 465}
466 466
467static void vmxoff_nmi(int cpu, struct die_args *args) 467static void vmxoff_nmi(int cpu, struct pt_regs *regs)
468{ 468{
469 cpu_emergency_vmxoff(); 469 cpu_emergency_vmxoff();
470} 470}
@@ -736,14 +736,10 @@ static nmi_shootdown_cb shootdown_callback;
736 736
737static atomic_t waiting_for_crash_ipi; 737static atomic_t waiting_for_crash_ipi;
738 738
739static int crash_nmi_callback(struct notifier_block *self, 739static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
740 unsigned long val, void *data)
741{ 740{
742 int cpu; 741 int cpu;
743 742
744 if (val != DIE_NMI)
745 return NOTIFY_OK;
746
747 cpu = raw_smp_processor_id(); 743 cpu = raw_smp_processor_id();
748 744
749 /* Don't do anything if this handler is invoked on crashing cpu. 745 /* Don't do anything if this handler is invoked on crashing cpu.
@@ -751,10 +747,10 @@ static int crash_nmi_callback(struct notifier_block *self,
751 * an NMI if system was initially booted with nmi_watchdog parameter. 747 * an NMI if system was initially booted with nmi_watchdog parameter.
752 */ 748 */
753 if (cpu == crashing_cpu) 749 if (cpu == crashing_cpu)
754 return NOTIFY_STOP; 750 return NMI_HANDLED;
755 local_irq_disable(); 751 local_irq_disable();
756 752
757 shootdown_callback(cpu, (struct die_args *)data); 753 shootdown_callback(cpu, regs);
758 754
759 atomic_dec(&waiting_for_crash_ipi); 755 atomic_dec(&waiting_for_crash_ipi);
760 /* Assume hlt works */ 756 /* Assume hlt works */
@@ -762,7 +758,7 @@ static int crash_nmi_callback(struct notifier_block *self,
762 for (;;) 758 for (;;)
763 cpu_relax(); 759 cpu_relax();
764 760
765 return 1; 761 return NMI_HANDLED;
766} 762}
767 763
768static void smp_send_nmi_allbutself(void) 764static void smp_send_nmi_allbutself(void)
@@ -770,12 +766,6 @@ static void smp_send_nmi_allbutself(void)
770 apic->send_IPI_allbutself(NMI_VECTOR); 766 apic->send_IPI_allbutself(NMI_VECTOR);
771} 767}
772 768
773static struct notifier_block crash_nmi_nb = {
774 .notifier_call = crash_nmi_callback,
775 /* we want to be the first one called */
776 .priority = NMI_LOCAL_HIGH_PRIOR+1,
777};
778
779/* Halt all other CPUs, calling the specified function on each of them 769/* Halt all other CPUs, calling the specified function on each of them
780 * 770 *
781 * This function can be used to halt all other CPUs on crash 771 * This function can be used to halt all other CPUs on crash
@@ -794,7 +784,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
794 784
795 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 785 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
796 /* Would it be better to replace the trap vector here? */ 786 /* Would it be better to replace the trap vector here? */
797 if (register_die_notifier(&crash_nmi_nb)) 787 if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
788 NMI_FLAG_FIRST, "crash"))
798 return; /* return what? */ 789 return; /* return what? */
799 /* Ensure the new callback function is set before sending 790 /* Ensure the new callback function is set before sending
800 * out the NMI 791 * out the NMI
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 6913369c234c..a8e3eb83466c 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -81,15 +81,6 @@ gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
81DECLARE_BITMAP(used_vectors, NR_VECTORS); 81DECLARE_BITMAP(used_vectors, NR_VECTORS);
82EXPORT_SYMBOL_GPL(used_vectors); 82EXPORT_SYMBOL_GPL(used_vectors);
83 83
84static int ignore_nmis;
85
86int unknown_nmi_panic;
87/*
88 * Prevent NMI reason port (0x61) being accessed simultaneously, can
89 * only be used in NMI handler.
90 */
91static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
92
93static inline void conditional_sti(struct pt_regs *regs) 84static inline void conditional_sti(struct pt_regs *regs)
94{ 85{
95 if (regs->flags & X86_EFLAGS_IF) 86 if (regs->flags & X86_EFLAGS_IF)
@@ -307,152 +298,6 @@ gp_in_kernel:
307 die("general protection fault", regs, error_code); 298 die("general protection fault", regs, error_code);
308} 299}
309 300
310static int __init setup_unknown_nmi_panic(char *str)
311{
312 unknown_nmi_panic = 1;
313 return 1;
314}
315__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
316
317static notrace __kprobes void
318pci_serr_error(unsigned char reason, struct pt_regs *regs)
319{
320 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
321 reason, smp_processor_id());
322
323 /*
324 * On some machines, PCI SERR line is used to report memory
325 * errors. EDAC makes use of it.
326 */
327#if defined(CONFIG_EDAC)
328 if (edac_handler_set()) {
329 edac_atomic_assert_error();
330 return;
331 }
332#endif
333
334 if (panic_on_unrecovered_nmi)
335 panic("NMI: Not continuing");
336
337 pr_emerg("Dazed and confused, but trying to continue\n");
338
339 /* Clear and disable the PCI SERR error line. */
340 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
341 outb(reason, NMI_REASON_PORT);
342}
343
344static notrace __kprobes void
345io_check_error(unsigned char reason, struct pt_regs *regs)
346{
347 unsigned long i;
348
349 pr_emerg(
350 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
351 reason, smp_processor_id());
352 show_registers(regs);
353
354 if (panic_on_io_nmi)
355 panic("NMI IOCK error: Not continuing");
356
357 /* Re-enable the IOCK line, wait for a few seconds */
358 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
359 outb(reason, NMI_REASON_PORT);
360
361 i = 20000;
362 while (--i) {
363 touch_nmi_watchdog();
364 udelay(100);
365 }
366
367 reason &= ~NMI_REASON_CLEAR_IOCHK;
368 outb(reason, NMI_REASON_PORT);
369}
370
371static notrace __kprobes void
372unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
373{
374 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
375 NOTIFY_STOP)
376 return;
377#ifdef CONFIG_MCA
378 /*
379 * Might actually be able to figure out what the guilty party
380 * is:
381 */
382 if (MCA_bus) {
383 mca_handle_nmi();
384 return;
385 }
386#endif
387 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
388 reason, smp_processor_id());
389
390 pr_emerg("Do you have a strange power saving mode enabled?\n");
391 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
392 panic("NMI: Not continuing");
393
394 pr_emerg("Dazed and confused, but trying to continue\n");
395}
396
397static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
398{
399 unsigned char reason = 0;
400
401 /*
402 * CPU-specific NMI must be processed before non-CPU-specific
403 * NMI, otherwise we may lose it, because the CPU-specific
404 * NMI can not be detected/processed on other CPUs.
405 */
406 if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
407 return;
408
409 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
410 raw_spin_lock(&nmi_reason_lock);
411 reason = get_nmi_reason();
412
413 if (reason & NMI_REASON_MASK) {
414 if (reason & NMI_REASON_SERR)
415 pci_serr_error(reason, regs);
416 else if (reason & NMI_REASON_IOCHK)
417 io_check_error(reason, regs);
418#ifdef CONFIG_X86_32
419 /*
420 * Reassert NMI in case it became active
421 * meanwhile as it's edge-triggered:
422 */
423 reassert_nmi();
424#endif
425 raw_spin_unlock(&nmi_reason_lock);
426 return;
427 }
428 raw_spin_unlock(&nmi_reason_lock);
429
430 unknown_nmi_error(reason, regs);
431}
432
433dotraplinkage notrace __kprobes void
434do_nmi(struct pt_regs *regs, long error_code)
435{
436 nmi_enter();
437
438 inc_irq_stat(__nmi_count);
439
440 if (!ignore_nmis)
441 default_do_nmi(regs);
442
443 nmi_exit();
444}
445
446void stop_nmi(void)
447{
448 ignore_nmis++;
449}
450
451void restart_nmi(void)
452{
453 ignore_nmis--;
454}
455
456/* May run on IST stack. */ 301/* May run on IST stack. */
457dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) 302dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
458{ 303{
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 9f33b984d0ef..374562ed6704 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -22,14 +22,23 @@
22#include <asm/inat.h> 22#include <asm/inat.h>
23#include <asm/insn.h> 23#include <asm/insn.h>
24 24
25#define get_next(t, insn) \ 25/* Verify next sizeof(t) bytes can be on the same instruction */
26 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) 26#define validate_next(t, insn, n) \
27 ((insn)->next_byte + sizeof(t) + n - (insn)->kaddr <= MAX_INSN_SIZE)
28
29#define __get_next(t, insn) \
30 ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
31
32#define __peek_nbyte_next(t, insn, n) \
33 ({ t r = *(t*)((insn)->next_byte + n); r; })
27 34
28#define peek_next(t, insn) \ 35#define get_next(t, insn) \
29 ({t r; r = *(t*)insn->next_byte; r; }) 36 ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
30 37
31#define peek_nbyte_next(t, insn, n) \ 38#define peek_nbyte_next(t, insn, n) \
32 ({t r; r = *(t*)((insn)->next_byte + n); r; }) 39 ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
40
41#define peek_next(t, insn) peek_nbyte_next(t, insn, 0)
33 42
34/** 43/**
35 * insn_init() - initialize struct insn 44 * insn_init() - initialize struct insn
@@ -158,6 +167,8 @@ vex_end:
158 insn->vex_prefix.got = 1; 167 insn->vex_prefix.got = 1;
159 168
160 prefixes->got = 1; 169 prefixes->got = 1;
170
171err_out:
161 return; 172 return;
162} 173}
163 174
@@ -208,6 +219,9 @@ void insn_get_opcode(struct insn *insn)
208 insn->attr = 0; /* This instruction is bad */ 219 insn->attr = 0; /* This instruction is bad */
209end: 220end:
210 opcode->got = 1; 221 opcode->got = 1;
222
223err_out:
224 return;
211} 225}
212 226
213/** 227/**
@@ -241,6 +255,9 @@ void insn_get_modrm(struct insn *insn)
241 if (insn->x86_64 && inat_is_force64(insn->attr)) 255 if (insn->x86_64 && inat_is_force64(insn->attr))
242 insn->opnd_bytes = 8; 256 insn->opnd_bytes = 8;
243 modrm->got = 1; 257 modrm->got = 1;
258
259err_out:
260 return;
244} 261}
245 262
246 263
@@ -290,6 +307,9 @@ void insn_get_sib(struct insn *insn)
290 } 307 }
291 } 308 }
292 insn->sib.got = 1; 309 insn->sib.got = 1;
310
311err_out:
312 return;
293} 313}
294 314
295 315
@@ -351,6 +371,9 @@ void insn_get_displacement(struct insn *insn)
351 } 371 }
352out: 372out:
353 insn->displacement.got = 1; 373 insn->displacement.got = 1;
374
375err_out:
376 return;
354} 377}
355 378
356/* Decode moffset16/32/64 */ 379/* Decode moffset16/32/64 */
@@ -373,6 +396,9 @@ static void __get_moffset(struct insn *insn)
373 break; 396 break;
374 } 397 }
375 insn->moffset1.got = insn->moffset2.got = 1; 398 insn->moffset1.got = insn->moffset2.got = 1;
399
400err_out:
401 return;
376} 402}
377 403
378/* Decode imm v32(Iz) */ 404/* Decode imm v32(Iz) */
@@ -389,6 +415,9 @@ static void __get_immv32(struct insn *insn)
389 insn->immediate.nbytes = 4; 415 insn->immediate.nbytes = 4;
390 break; 416 break;
391 } 417 }
418
419err_out:
420 return;
392} 421}
393 422
394/* Decode imm v64(Iv/Ov) */ 423/* Decode imm v64(Iv/Ov) */
@@ -411,6 +440,9 @@ static void __get_immv(struct insn *insn)
411 break; 440 break;
412 } 441 }
413 insn->immediate1.got = insn->immediate2.got = 1; 442 insn->immediate1.got = insn->immediate2.got = 1;
443
444err_out:
445 return;
414} 446}
415 447
416/* Decode ptr16:16/32(Ap) */ 448/* Decode ptr16:16/32(Ap) */
@@ -432,6 +464,9 @@ static void __get_immptr(struct insn *insn)
432 insn->immediate2.value = get_next(unsigned short, insn); 464 insn->immediate2.value = get_next(unsigned short, insn);
433 insn->immediate2.nbytes = 2; 465 insn->immediate2.nbytes = 2;
434 insn->immediate1.got = insn->immediate2.got = 1; 466 insn->immediate1.got = insn->immediate2.got = 1;
467
468err_out:
469 return;
435} 470}
436 471
437/** 472/**
@@ -496,6 +531,9 @@ void insn_get_immediate(struct insn *insn)
496 } 531 }
497done: 532done:
498 insn->immediate.got = 1; 533 insn->immediate.got = 1;
534
535err_out:
536 return;
499} 537}
500 538
501/** 539/**
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 0d17c8c50acd..9c7378df740a 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -420,12 +420,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
420 return 0; 420 return 0;
421} 421}
422 422
423#ifdef CONFIG_CPU_SUP_AMD
423static const char errata93_warning[] = 424static const char errata93_warning[] =
424KERN_ERR 425KERN_ERR
425"******* Your BIOS seems to not contain a fix for K8 errata #93\n" 426"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
426"******* Working around it, but it may cause SEGVs or burn power.\n" 427"******* Working around it, but it may cause SEGVs or burn power.\n"
427"******* Please consider a BIOS update.\n" 428"******* Please consider a BIOS update.\n"
428"******* Disabling USB legacy in the BIOS may also help.\n"; 429"******* Disabling USB legacy in the BIOS may also help.\n";
430#endif
429 431
430/* 432/*
431 * No vm86 mode in 64-bit mode: 433 * No vm86 mode in 64-bit mode:
@@ -505,7 +507,11 @@ bad:
505 */ 507 */
506static int is_errata93(struct pt_regs *regs, unsigned long address) 508static int is_errata93(struct pt_regs *regs, unsigned long address)
507{ 509{
508#ifdef CONFIG_X86_64 510#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
511 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
512 || boot_cpu_data.x86 != 0xf)
513 return 0;
514
509 if (address != regs->ip) 515 if (address != regs->ip)
510 return 0; 516 return 0;
511 517
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 96646b3aeca8..75f9528e0372 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -61,26 +61,15 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
61} 61}
62 62
63 63
64static int profile_exceptions_notify(struct notifier_block *self, 64static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
65 unsigned long val, void *data)
66{ 65{
67 struct die_args *args = (struct die_args *)data; 66 if (ctr_running)
68 int ret = NOTIFY_DONE; 67 model->check_ctrs(regs, &__get_cpu_var(cpu_msrs));
69 68 else if (!nmi_enabled)
70 switch (val) { 69 return NMI_DONE;
71 case DIE_NMI: 70 else
72 if (ctr_running) 71 model->stop(&__get_cpu_var(cpu_msrs));
73 model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs)); 72 return NMI_HANDLED;
74 else if (!nmi_enabled)
75 break;
76 else
77 model->stop(&__get_cpu_var(cpu_msrs));
78 ret = NOTIFY_STOP;
79 break;
80 default:
81 break;
82 }
83 return ret;
84} 73}
85 74
86static void nmi_cpu_save_registers(struct op_msrs *msrs) 75static void nmi_cpu_save_registers(struct op_msrs *msrs)
@@ -363,12 +352,6 @@ static void nmi_cpu_setup(void *dummy)
363 apic_write(APIC_LVTPC, APIC_DM_NMI); 352 apic_write(APIC_LVTPC, APIC_DM_NMI);
364} 353}
365 354
366static struct notifier_block profile_exceptions_nb = {
367 .notifier_call = profile_exceptions_notify,
368 .next = NULL,
369 .priority = NMI_LOCAL_LOW_PRIOR,
370};
371
372static void nmi_cpu_restore_registers(struct op_msrs *msrs) 355static void nmi_cpu_restore_registers(struct op_msrs *msrs)
373{ 356{
374 struct op_msr *counters = msrs->counters; 357 struct op_msr *counters = msrs->counters;
@@ -402,8 +385,6 @@ static void nmi_cpu_shutdown(void *dummy)
402 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); 385 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
403 apic_write(APIC_LVTERR, v); 386 apic_write(APIC_LVTERR, v);
404 nmi_cpu_restore_registers(msrs); 387 nmi_cpu_restore_registers(msrs);
405 if (model->cpu_down)
406 model->cpu_down();
407} 388}
408 389
409static void nmi_cpu_up(void *dummy) 390static void nmi_cpu_up(void *dummy)
@@ -508,7 +489,8 @@ static int nmi_setup(void)
508 ctr_running = 0; 489 ctr_running = 0;
509 /* make variables visible to the nmi handler: */ 490 /* make variables visible to the nmi handler: */
510 smp_mb(); 491 smp_mb();
511 err = register_die_notifier(&profile_exceptions_nb); 492 err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
493 0, "oprofile");
512 if (err) 494 if (err)
513 goto fail; 495 goto fail;
514 496
@@ -538,7 +520,7 @@ static void nmi_shutdown(void)
538 put_online_cpus(); 520 put_online_cpus();
539 /* make variables visible to the nmi handler: */ 521 /* make variables visible to the nmi handler: */
540 smp_mb(); 522 smp_mb();
541 unregister_die_notifier(&profile_exceptions_nb); 523 unregister_nmi_handler(NMI_LOCAL, "oprofile");
542 msrs = &get_cpu_var(cpu_msrs); 524 msrs = &get_cpu_var(cpu_msrs);
543 model->shutdown(msrs); 525 model->shutdown(msrs);
544 free_msrs(); 526 free_msrs();
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c
index 720bf5a53c51..7f8052cd6620 100644
--- a/arch/x86/oprofile/nmi_timer_int.c
+++ b/arch/x86/oprofile/nmi_timer_int.c
@@ -18,32 +18,16 @@
18#include <asm/apic.h> 18#include <asm/apic.h>
19#include <asm/ptrace.h> 19#include <asm/ptrace.h>
20 20
21static int profile_timer_exceptions_notify(struct notifier_block *self, 21static int profile_timer_exceptions_notify(unsigned int val, struct pt_regs *regs)
22 unsigned long val, void *data)
23{ 22{
24 struct die_args *args = (struct die_args *)data; 23 oprofile_add_sample(regs, 0);
25 int ret = NOTIFY_DONE; 24 return NMI_HANDLED;
26
27 switch (val) {
28 case DIE_NMI:
29 oprofile_add_sample(args->regs, 0);
30 ret = NOTIFY_STOP;
31 break;
32 default:
33 break;
34 }
35 return ret;
36} 25}
37 26
38static struct notifier_block profile_timer_exceptions_nb = {
39 .notifier_call = profile_timer_exceptions_notify,
40 .next = NULL,
41 .priority = NMI_LOW_PRIOR,
42};
43
44static int timer_start(void) 27static int timer_start(void)
45{ 28{
46 if (register_die_notifier(&profile_timer_exceptions_nb)) 29 if (register_nmi_handler(NMI_LOCAL, profile_timer_exceptions_notify,
30 0, "oprofile-timer"))
47 return 1; 31 return 1;
48 return 0; 32 return 0;
49} 33}
@@ -51,7 +35,7 @@ static int timer_start(void)
51 35
52static void timer_stop(void) 36static void timer_stop(void)
53{ 37{
54 unregister_die_notifier(&profile_timer_exceptions_nb); 38 unregister_nmi_handler(NMI_LOCAL, "oprofile-timer");
55 synchronize_sched(); /* Allow already-started NMIs to complete. */ 39 synchronize_sched(); /* Allow already-started NMIs to complete. */
56} 40}
57 41
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 9cbb710dc94b..303f08637826 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -29,8 +29,6 @@
29#include "op_x86_model.h" 29#include "op_x86_model.h"
30#include "op_counter.h" 30#include "op_counter.h"
31 31
32#define NUM_COUNTERS 4
33#define NUM_COUNTERS_F15H 6
34#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 32#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
35#define NUM_VIRT_COUNTERS 32 33#define NUM_VIRT_COUNTERS 32
36#else 34#else
@@ -70,62 +68,12 @@ static struct ibs_config ibs_config;
70static struct ibs_state ibs_state; 68static struct ibs_state ibs_state;
71 69
72/* 70/*
73 * IBS cpuid feature detection
74 */
75
76#define IBS_CPUID_FEATURES 0x8000001b
77
78/*
79 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
80 * bit 0 is used to indicate the existence of IBS.
81 */
82#define IBS_CAPS_AVAIL (1U<<0)
83#define IBS_CAPS_FETCHSAM (1U<<1)
84#define IBS_CAPS_OPSAM (1U<<2)
85#define IBS_CAPS_RDWROPCNT (1U<<3)
86#define IBS_CAPS_OPCNT (1U<<4)
87#define IBS_CAPS_BRNTRGT (1U<<5)
88#define IBS_CAPS_OPCNTEXT (1U<<6)
89
90#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
91 | IBS_CAPS_FETCHSAM \
92 | IBS_CAPS_OPSAM)
93
94/*
95 * IBS APIC setup
96 */
97#define IBSCTL 0x1cc
98#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
99#define IBSCTL_LVT_OFFSET_MASK 0x0F
100
101/*
102 * IBS randomization macros 71 * IBS randomization macros
103 */ 72 */
104#define IBS_RANDOM_BITS 12 73#define IBS_RANDOM_BITS 12
105#define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1) 74#define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1)
106#define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5)) 75#define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5))
107 76
108static u32 get_ibs_caps(void)
109{
110 u32 ibs_caps;
111 unsigned int max_level;
112
113 if (!boot_cpu_has(X86_FEATURE_IBS))
114 return 0;
115
116 /* check IBS cpuid feature flags */
117 max_level = cpuid_eax(0x80000000);
118 if (max_level < IBS_CPUID_FEATURES)
119 return IBS_CAPS_DEFAULT;
120
121 ibs_caps = cpuid_eax(IBS_CPUID_FEATURES);
122 if (!(ibs_caps & IBS_CAPS_AVAIL))
123 /* cpuid flags not valid */
124 return IBS_CAPS_DEFAULT;
125
126 return ibs_caps;
127}
128
129/* 77/*
130 * 16-bit Linear Feedback Shift Register (LFSR) 78 * 16-bit Linear Feedback Shift Register (LFSR)
131 * 79 *
@@ -316,81 +264,6 @@ static void op_amd_stop_ibs(void)
316 wrmsrl(MSR_AMD64_IBSOPCTL, 0); 264 wrmsrl(MSR_AMD64_IBSOPCTL, 0);
317} 265}
318 266
319static inline int get_eilvt(int offset)
320{
321 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
322}
323
324static inline int put_eilvt(int offset)
325{
326 return !setup_APIC_eilvt(offset, 0, 0, 1);
327}
328
329static inline int ibs_eilvt_valid(void)
330{
331 int offset;
332 u64 val;
333 int valid = 0;
334
335 preempt_disable();
336
337 rdmsrl(MSR_AMD64_IBSCTL, val);
338 offset = val & IBSCTL_LVT_OFFSET_MASK;
339
340 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
341 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
342 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
343 goto out;
344 }
345
346 if (!get_eilvt(offset)) {
347 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
348 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
349 goto out;
350 }
351
352 valid = 1;
353out:
354 preempt_enable();
355
356 return valid;
357}
358
359static inline int get_ibs_offset(void)
360{
361 u64 val;
362
363 rdmsrl(MSR_AMD64_IBSCTL, val);
364 if (!(val & IBSCTL_LVT_OFFSET_VALID))
365 return -EINVAL;
366
367 return val & IBSCTL_LVT_OFFSET_MASK;
368}
369
370static void setup_APIC_ibs(void)
371{
372 int offset;
373
374 offset = get_ibs_offset();
375 if (offset < 0)
376 goto failed;
377
378 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
379 return;
380failed:
381 pr_warn("oprofile: IBS APIC setup failed on cpu #%d\n",
382 smp_processor_id());
383}
384
385static void clear_APIC_ibs(void)
386{
387 int offset;
388
389 offset = get_ibs_offset();
390 if (offset >= 0)
391 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
392}
393
394#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 267#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
395 268
396static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, 269static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
@@ -439,7 +312,7 @@ static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
439 goto fail; 312 goto fail;
440 } 313 }
441 /* both registers must be reserved */ 314 /* both registers must be reserved */
442 if (num_counters == NUM_COUNTERS_F15H) { 315 if (num_counters == AMD64_NUM_COUNTERS_F15H) {
443 msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1); 316 msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1);
444 msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1); 317 msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1);
445 } else { 318 } else {
@@ -504,15 +377,6 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
504 val |= op_x86_get_ctrl(model, &counter_config[virt]); 377 val |= op_x86_get_ctrl(model, &counter_config[virt]);
505 wrmsrl(msrs->controls[i].addr, val); 378 wrmsrl(msrs->controls[i].addr, val);
506 } 379 }
507
508 if (ibs_caps)
509 setup_APIC_ibs();
510}
511
512static void op_amd_cpu_shutdown(void)
513{
514 if (ibs_caps)
515 clear_APIC_ibs();
516} 380}
517 381
518static int op_amd_check_ctrs(struct pt_regs * const regs, 382static int op_amd_check_ctrs(struct pt_regs * const regs,
@@ -575,86 +439,6 @@ static void op_amd_stop(struct op_msrs const * const msrs)
575 op_amd_stop_ibs(); 439 op_amd_stop_ibs();
576} 440}
577 441
578static int setup_ibs_ctl(int ibs_eilvt_off)
579{
580 struct pci_dev *cpu_cfg;
581 int nodes;
582 u32 value = 0;
583
584 nodes = 0;
585 cpu_cfg = NULL;
586 do {
587 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
588 PCI_DEVICE_ID_AMD_10H_NB_MISC,
589 cpu_cfg);
590 if (!cpu_cfg)
591 break;
592 ++nodes;
593 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
594 | IBSCTL_LVT_OFFSET_VALID);
595 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
596 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
597 pci_dev_put(cpu_cfg);
598 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
599 "IBSCTL = 0x%08x\n", value);
600 return -EINVAL;
601 }
602 } while (1);
603
604 if (!nodes) {
605 printk(KERN_DEBUG "No CPU node configured for IBS\n");
606 return -ENODEV;
607 }
608
609 return 0;
610}
611
612/*
613 * This runs only on the current cpu. We try to find an LVT offset and
614 * setup the local APIC. For this we must disable preemption. On
615 * success we initialize all nodes with this offset. This updates then
616 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
617 * the IBS interrupt vector is called from op_amd_setup_ctrs()/op_-
618 * amd_cpu_shutdown() using the new offset.
619 */
620static int force_ibs_eilvt_setup(void)
621{
622 int offset;
623 int ret;
624
625 preempt_disable();
626 /* find the next free available EILVT entry, skip offset 0 */
627 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
628 if (get_eilvt(offset))
629 break;
630 }
631 preempt_enable();
632
633 if (offset == APIC_EILVT_NR_MAX) {
634 printk(KERN_DEBUG "No EILVT entry available\n");
635 return -EBUSY;
636 }
637
638 ret = setup_ibs_ctl(offset);
639 if (ret)
640 goto out;
641
642 if (!ibs_eilvt_valid()) {
643 ret = -EFAULT;
644 goto out;
645 }
646
647 pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
648 pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
649
650 return 0;
651out:
652 preempt_disable();
653 put_eilvt(offset);
654 preempt_enable();
655 return ret;
656}
657
658/* 442/*
659 * check and reserve APIC extended interrupt LVT offset for IBS if 443 * check and reserve APIC extended interrupt LVT offset for IBS if
660 * available 444 * available
@@ -667,17 +451,6 @@ static void init_ibs(void)
667 if (!ibs_caps) 451 if (!ibs_caps)
668 return; 452 return;
669 453
670 if (ibs_eilvt_valid())
671 goto out;
672
673 if (!force_ibs_eilvt_setup())
674 goto out;
675
676 /* Failed to setup ibs */
677 ibs_caps = 0;
678 return;
679
680out:
681 printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); 454 printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
682} 455}
683 456
@@ -741,9 +514,9 @@ static int op_amd_init(struct oprofile_operations *ops)
741 ops->create_files = setup_ibs_files; 514 ops->create_files = setup_ibs_files;
742 515
743 if (boot_cpu_data.x86 == 0x15) { 516 if (boot_cpu_data.x86 == 0x15) {
744 num_counters = NUM_COUNTERS_F15H; 517 num_counters = AMD64_NUM_COUNTERS_F15H;
745 } else { 518 } else {
746 num_counters = NUM_COUNTERS; 519 num_counters = AMD64_NUM_COUNTERS;
747 } 520 }
748 521
749 op_amd_spec.num_counters = num_counters; 522 op_amd_spec.num_counters = num_counters;
@@ -760,7 +533,6 @@ struct op_x86_model_spec op_amd_spec = {
760 .init = op_amd_init, 533 .init = op_amd_init,
761 .fill_in_addresses = &op_amd_fill_in_addresses, 534 .fill_in_addresses = &op_amd_fill_in_addresses,
762 .setup_ctrs = &op_amd_setup_ctrs, 535 .setup_ctrs = &op_amd_setup_ctrs,
763 .cpu_down = &op_amd_cpu_shutdown,
764 .check_ctrs = &op_amd_check_ctrs, 536 .check_ctrs = &op_amd_check_ctrs,
765 .start = &op_amd_start, 537 .start = &op_amd_start,
766 .stop = &op_amd_stop, 538 .stop = &op_amd_stop,
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 94b745045e45..d90528ea5412 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -28,7 +28,7 @@ static int counter_width = 32;
28 28
29#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21)) 29#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
30 30
31static u64 *reset_value; 31static u64 reset_value[OP_MAX_COUNTER];
32 32
33static void ppro_shutdown(struct op_msrs const * const msrs) 33static void ppro_shutdown(struct op_msrs const * const msrs)
34{ 34{
@@ -40,10 +40,6 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
40 release_perfctr_nmi(MSR_P6_PERFCTR0 + i); 40 release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
41 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); 41 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
42 } 42 }
43 if (reset_value) {
44 kfree(reset_value);
45 reset_value = NULL;
46 }
47} 43}
48 44
49static int ppro_fill_in_addresses(struct op_msrs * const msrs) 45static int ppro_fill_in_addresses(struct op_msrs * const msrs)
@@ -79,13 +75,6 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
79 u64 val; 75 u64 val;
80 int i; 76 int i;
81 77
82 if (!reset_value) {
83 reset_value = kzalloc(sizeof(reset_value[0]) * num_counters,
84 GFP_ATOMIC);
85 if (!reset_value)
86 return;
87 }
88
89 if (cpu_has_arch_perfmon) { 78 if (cpu_has_arch_perfmon) {
90 union cpuid10_eax eax; 79 union cpuid10_eax eax;
91 eax.full = cpuid_eax(0xa); 80 eax.full = cpuid_eax(0xa);
@@ -141,13 +130,6 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
141 u64 val; 130 u64 val;
142 int i; 131 int i;
143 132
144 /*
145 * This can happen if perf counters are in use when
146 * we steal the die notifier NMI.
147 */
148 if (unlikely(!reset_value))
149 goto out;
150
151 for (i = 0; i < num_counters; ++i) { 133 for (i = 0; i < num_counters; ++i) {
152 if (!reset_value[i]) 134 if (!reset_value[i])
153 continue; 135 continue;
@@ -158,7 +140,6 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
158 wrmsrl(msrs->counters[i].addr, -reset_value[i]); 140 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
159 } 141 }
160 142
161out:
162 /* Only P6 based Pentium M need to re-unmask the apic vector but it 143 /* Only P6 based Pentium M need to re-unmask the apic vector but it
163 * doesn't hurt other P6 variant */ 144 * doesn't hurt other P6 variant */
164 apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); 145 apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
@@ -179,8 +160,6 @@ static void ppro_start(struct op_msrs const * const msrs)
179 u64 val; 160 u64 val;
180 int i; 161 int i;
181 162
182 if (!reset_value)
183 return;
184 for (i = 0; i < num_counters; ++i) { 163 for (i = 0; i < num_counters; ++i) {
185 if (reset_value[i]) { 164 if (reset_value[i]) {
186 rdmsrl(msrs->controls[i].addr, val); 165 rdmsrl(msrs->controls[i].addr, val);
@@ -196,8 +175,6 @@ static void ppro_stop(struct op_msrs const * const msrs)
196 u64 val; 175 u64 val;
197 int i; 176 int i;
198 177
199 if (!reset_value)
200 return;
201 for (i = 0; i < num_counters; ++i) { 178 for (i = 0; i < num_counters; ++i) {
202 if (!reset_value[i]) 179 if (!reset_value[i])
203 continue; 180 continue;
@@ -242,7 +219,7 @@ static void arch_perfmon_setup_counters(void)
242 eax.split.bit_width = 40; 219 eax.split.bit_width = 40;
243 } 220 }
244 221
245 num_counters = eax.split.num_counters; 222 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
246 223
247 op_arch_perfmon_spec.num_counters = num_counters; 224 op_arch_perfmon_spec.num_counters = num_counters;
248 op_arch_perfmon_spec.num_controls = num_counters; 225 op_arch_perfmon_spec.num_controls = num_counters;
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 89017fa1fd63..71e8a67337e2 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -43,7 +43,6 @@ struct op_x86_model_spec {
43 int (*fill_in_addresses)(struct op_msrs * const msrs); 43 int (*fill_in_addresses)(struct op_msrs * const msrs);
44 void (*setup_ctrs)(struct op_x86_model_spec const *model, 44 void (*setup_ctrs)(struct op_x86_model_spec const *model,
45 struct op_msrs const * const msrs); 45 struct op_msrs const * const msrs);
46 void (*cpu_down)(void);
47 int (*check_ctrs)(struct pt_regs * const regs, 46 int (*check_ctrs)(struct pt_regs * const regs,
48 struct op_msrs const * const msrs); 47 struct op_msrs const * const msrs);
49 void (*start)(struct op_msrs const * const msrs); 48 void (*start)(struct op_msrs const * const msrs);
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index 0234cd198c54..f932b30b47fb 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -15,7 +15,6 @@ CONFIG_GENERIC_GPIO=y
15# CONFIG_ARCH_HAS_ILOG2_U64 is not set 15# CONFIG_ARCH_HAS_ILOG2_U64 is not set
16CONFIG_NO_IOPORT=y 16CONFIG_NO_IOPORT=y
17CONFIG_HZ=100 17CONFIG_HZ=100
18CONFIG_GENERIC_TIME=y
19CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 18CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
20CONFIG_CONSTRUCTORS=y 19CONFIG_CONSTRUCTORS=y
21 20
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
index 4891abbf16bc..550e8ed5b5c6 100644
--- a/arch/xtensa/configs/s6105_defconfig
+++ b/arch/xtensa/configs/s6105_defconfig
@@ -15,7 +15,6 @@ CONFIG_GENERIC_GPIO=y
15# CONFIG_ARCH_HAS_ILOG2_U64 is not set 15# CONFIG_ARCH_HAS_ILOG2_U64 is not set
16CONFIG_NO_IOPORT=y 16CONFIG_NO_IOPORT=y
17CONFIG_HZ=100 17CONFIG_HZ=100
18CONFIG_GENERIC_TIME=y
19CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 18CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
20 19
21# 20#