aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/perf_event.h5
-rw-r--r--arch/alpha/kernel/time.c30
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/perf_event.h12
-rw-r--r--arch/arm/kernel/perf_event.c8
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/frv/lib/Makefile2
-rw-r--r--arch/frv/lib/perf_event.c19
-rw-r--r--arch/ia64/include/asm/hardirq.h11
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/perf_event.h3
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/kernel/perf_event.c2
-rw-r--r--arch/powerpc/kernel/time.c42
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/hardirq.h4
-rw-r--r--arch/s390/include/asm/perf_event.h3
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/include/asm/perf_event.h7
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/perf_event.h4
-rw-r--r--arch/sparc/kernel/pcr.c8
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/entry_arch.h4
-rw-r--r--arch/x86/include/asm/hardirq.h2
-rw-r--r--arch/x86/include/asm/hw_irq.h2
-rw-r--r--arch/x86/include/asm/irq_vectors.h4
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/cpu/perf_event.c19
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/entry_64.S6
-rw-r--r--arch/x86/kernel/irq.c8
-rw-r--r--arch/x86/kernel/irq_work.c30
-rw-r--r--arch/x86/kernel/irqinit.c6
-rw-r--r--include/linux/irq_work.h20
-rw-r--r--include/linux/jump_label.h18
-rw-r--r--include/linux/jump_label_ref.h44
-rw-r--r--include/linux/perf_event.h57
-rw-r--r--init/Kconfig8
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/hw_breakpoint.c8
-rw-r--r--kernel/irq_work.c164
-rw-r--r--kernel/perf_event.c274
-rw-r--r--kernel/timer.c7
-rw-r--r--kernel/tracepoint.c6
-rw-r--r--lib/dynamic_debug.c4
-rw-r--r--scripts/Makefile.build8
49 files changed, 548 insertions, 330 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index b9647bb66d13..d04ccd73af45 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -9,6 +9,7 @@ config ALPHA
9 select HAVE_IDE 9 select HAVE_IDE
10 select HAVE_OPROFILE 10 select HAVE_OPROFILE
11 select HAVE_SYSCALL_WRAPPERS 11 select HAVE_SYSCALL_WRAPPERS
12 select HAVE_IRQ_WORK
12 select HAVE_PERF_EVENTS 13 select HAVE_PERF_EVENTS
13 select HAVE_DMA_ATTRS 14 select HAVE_DMA_ATTRS
14 help 15 help
diff --git a/arch/alpha/include/asm/perf_event.h b/arch/alpha/include/asm/perf_event.h
index 4157cd3c44a9..fe792ca818f6 100644
--- a/arch/alpha/include/asm/perf_event.h
+++ b/arch/alpha/include/asm/perf_event.h
@@ -1,11 +1,6 @@
1#ifndef __ASM_ALPHA_PERF_EVENT_H 1#ifndef __ASM_ALPHA_PERF_EVENT_H
2#define __ASM_ALPHA_PERF_EVENT_H 2#define __ASM_ALPHA_PERF_EVENT_H
3 3
4/* Alpha only supports software events through this interface. */
5extern void set_perf_event_pending(void);
6
7#define PERF_EVENT_INDEX_OFFSET 0
8
9#ifdef CONFIG_PERF_EVENTS 4#ifdef CONFIG_PERF_EVENTS
10extern void init_hw_perf_events(void); 5extern void init_hw_perf_events(void);
11#else 6#else
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 396af1799ea4..0f1d8493cfca 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -41,7 +41,7 @@
41#include <linux/init.h> 41#include <linux/init.h>
42#include <linux/bcd.h> 42#include <linux/bcd.h>
43#include <linux/profile.h> 43#include <linux/profile.h>
44#include <linux/perf_event.h> 44#include <linux/irq_work.h>
45 45
46#include <asm/uaccess.h> 46#include <asm/uaccess.h>
47#include <asm/io.h> 47#include <asm/io.h>
@@ -83,25 +83,25 @@ static struct {
83 83
84unsigned long est_cycle_freq; 84unsigned long est_cycle_freq;
85 85
86#ifdef CONFIG_PERF_EVENTS 86#ifdef CONFIG_IRQ_WORK
87 87
88DEFINE_PER_CPU(u8, perf_event_pending); 88DEFINE_PER_CPU(u8, irq_work_pending);
89 89
90#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 90#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
91#define test_perf_event_pending() __get_cpu_var(perf_event_pending) 91#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
92#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 92#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
93 93
94void set_perf_event_pending(void) 94void set_irq_work_pending(void)
95{ 95{
96 set_perf_event_pending_flag(); 96 set_irq_work_pending_flag();
97} 97}
98 98
99#else /* CONFIG_PERF_EVENTS */ 99#else /* CONFIG_IRQ_WORK */
100 100
101#define test_perf_event_pending() 0 101#define test_irq_work_pending() 0
102#define clear_perf_event_pending() 102#define clear_irq_work_pending()
103 103
104#endif /* CONFIG_PERF_EVENTS */ 104#endif /* CONFIG_IRQ_WORK */
105 105
106 106
107static inline __u32 rpcc(void) 107static inline __u32 rpcc(void)
@@ -191,9 +191,9 @@ irqreturn_t timer_interrupt(int irq, void *dev)
191 191
192 write_sequnlock(&xtime_lock); 192 write_sequnlock(&xtime_lock);
193 193
194 if (test_perf_event_pending()) { 194 if (test_irq_work_pending()) {
195 clear_perf_event_pending(); 195 clear_irq_work_pending();
196 perf_event_do_pending(); 196 irq_work_run();
197 } 197 }
198 198
199#ifndef CONFIG_SMP 199#ifndef CONFIG_SMP
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 88c97bc7a6f5..7c0dfccd05bd 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -23,6 +23,7 @@ config ARM
23 select HAVE_KERNEL_GZIP 23 select HAVE_KERNEL_GZIP
24 select HAVE_KERNEL_LZO 24 select HAVE_KERNEL_LZO
25 select HAVE_KERNEL_LZMA 25 select HAVE_KERNEL_LZMA
26 select HAVE_IRQ_WORK
26 select HAVE_PERF_EVENTS 27 select HAVE_PERF_EVENTS
27 select PERF_USE_VMALLOC 28 select PERF_USE_VMALLOC
28 select HAVE_REGS_AND_STACK_ACCESS_API 29 select HAVE_REGS_AND_STACK_ACCESS_API
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index b5799a3b7117..c4aa4e8c6af9 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -12,18 +12,6 @@
12#ifndef __ARM_PERF_EVENT_H__ 12#ifndef __ARM_PERF_EVENT_H__
13#define __ARM_PERF_EVENT_H__ 13#define __ARM_PERF_EVENT_H__
14 14
15/*
16 * NOP: on *most* (read: all supported) ARM platforms, the performance
17 * counter interrupts are regular interrupts and not an NMI. This
18 * means that when we receive the interrupt we can call
19 * perf_event_do_pending() that handles all of the work with
20 * interrupts disabled.
21 */
22static inline void
23set_perf_event_pending(void)
24{
25}
26
27/* ARM performance counters start from 1 (in the cp15 accesses) so use the 15/* ARM performance counters start from 1 (in the cp15 accesses) so use the
28 * same indexes here for consistency. */ 16 * same indexes here for consistency. */
29#define PERF_EVENT_INDEX_OFFSET 1 17#define PERF_EVENT_INDEX_OFFSET 1
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 6cc6521881aa..49643b1467e6 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -1092,7 +1092,7 @@ armv6pmu_handle_irq(int irq_num,
1092 * platforms that can have the PMU interrupts raised as an NMI, this 1092 * platforms that can have the PMU interrupts raised as an NMI, this
1093 * will not work. 1093 * will not work.
1094 */ 1094 */
1095 perf_event_do_pending(); 1095 irq_work_run();
1096 1096
1097 return IRQ_HANDLED; 1097 return IRQ_HANDLED;
1098} 1098}
@@ -2068,7 +2068,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
2068 * platforms that can have the PMU interrupts raised as an NMI, this 2068 * platforms that can have the PMU interrupts raised as an NMI, this
2069 * will not work. 2069 * will not work.
2070 */ 2070 */
2071 perf_event_do_pending(); 2071 irq_work_run();
2072 2072
2073 return IRQ_HANDLED; 2073 return IRQ_HANDLED;
2074} 2074}
@@ -2436,7 +2436,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
2436 armpmu->disable(hwc, idx); 2436 armpmu->disable(hwc, idx);
2437 } 2437 }
2438 2438
2439 perf_event_do_pending(); 2439 irq_work_run();
2440 2440
2441 /* 2441 /*
2442 * Re-enable the PMU. 2442 * Re-enable the PMU.
@@ -2763,7 +2763,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
2763 armpmu->disable(hwc, idx); 2763 armpmu->disable(hwc, idx);
2764 } 2764 }
2765 2765
2766 perf_event_do_pending(); 2766 irq_work_run();
2767 2767
2768 /* 2768 /*
2769 * Re-enable the PMU. 2769 * Re-enable the PMU.
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 16399bd24993..0f2417df6323 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -7,6 +7,7 @@ config FRV
7 default y 7 default y
8 select HAVE_IDE 8 select HAVE_IDE
9 select HAVE_ARCH_TRACEHOOK 9 select HAVE_ARCH_TRACEHOOK
10 select HAVE_IRQ_WORK
10 select HAVE_PERF_EVENTS 11 select HAVE_PERF_EVENTS
11 12
12config ZONE_DMA 13config ZONE_DMA
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index f4709756d0d9..4ff2fb1e6b16 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -5,4 +5,4 @@
5lib-y := \ 5lib-y := \
6 __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ 6 __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
7 checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ 7 checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
8 outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o 8 outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
diff --git a/arch/frv/lib/perf_event.c b/arch/frv/lib/perf_event.c
deleted file mode 100644
index 9ac5acfd2e91..000000000000
--- a/arch/frv/lib/perf_event.c
+++ /dev/null
@@ -1,19 +0,0 @@
1/* Performance event handling
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/perf_event.h>
13
14/*
15 * mark the performance event as pending
16 */
17void set_perf_event_pending(void)
18{
19}
diff --git a/arch/ia64/include/asm/hardirq.h b/arch/ia64/include/asm/hardirq.h
index d514cd9edb49..8fb7d33a661f 100644
--- a/arch/ia64/include/asm/hardirq.h
+++ b/arch/ia64/include/asm/hardirq.h
@@ -6,12 +6,6 @@
6 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */ 7 */
8 8
9
10#include <linux/threads.h>
11#include <linux/irq.h>
12
13#include <asm/processor.h>
14
15/* 9/*
16 * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure. 10 * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure.
17 */ 11 */
@@ -20,6 +14,11 @@
20 14
21#define local_softirq_pending() (local_cpu_data->softirq_pending) 15#define local_softirq_pending() (local_cpu_data->softirq_pending)
22 16
17#include <linux/threads.h>
18#include <linux/irq.h>
19
20#include <asm/processor.h>
21
23extern void __iomem *ipi_base_addr; 22extern void __iomem *ipi_base_addr;
24 23
25void ack_bad_irq(unsigned int irq); 24void ack_bad_irq(unsigned int irq);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 907417d187e1..79a04a9394d5 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -16,6 +16,7 @@ config PARISC
16 select RTC_DRV_GENERIC 16 select RTC_DRV_GENERIC
17 select INIT_ALL_POSSIBLE 17 select INIT_ALL_POSSIBLE
18 select BUG 18 select BUG
19 select HAVE_IRQ_WORK
19 select HAVE_PERF_EVENTS 20 select HAVE_PERF_EVENTS
20 select GENERIC_ATOMIC64 if !64BIT 21 select GENERIC_ATOMIC64 if !64BIT
21 help 22 help
diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h
index cc146427d8f9..1e0fd8ba6c03 100644
--- a/arch/parisc/include/asm/perf_event.h
+++ b/arch/parisc/include/asm/perf_event.h
@@ -1,7 +1,6 @@
1#ifndef __ASM_PARISC_PERF_EVENT_H 1#ifndef __ASM_PARISC_PERF_EVENT_H
2#define __ASM_PARISC_PERF_EVENT_H 2#define __ASM_PARISC_PERF_EVENT_H
3 3
4/* parisc only supports software events through this interface. */ 4/* Empty, just to avoid compiling error */
5static inline void set_perf_event_pending(void) { }
6 5
7#endif /* __ASM_PARISC_PERF_EVENT_H */ 6#endif /* __ASM_PARISC_PERF_EVENT_H */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 631e5a0fb6ab..4b1e521d966f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -138,6 +138,7 @@ config PPC
138 select HAVE_OPROFILE 138 select HAVE_OPROFILE
139 select HAVE_SYSCALL_WRAPPERS if PPC64 139 select HAVE_SYSCALL_WRAPPERS if PPC64
140 select GENERIC_ATOMIC64 if PPC32 140 select GENERIC_ATOMIC64 if PPC32
141 select HAVE_IRQ_WORK
141 select HAVE_PERF_EVENTS 142 select HAVE_PERF_EVENTS
142 select HAVE_REGS_AND_STACK_ACCESS_API 143 select HAVE_REGS_AND_STACK_ACCESS_API
143 select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 144 select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 1ff6662f7faf..9b287fdd8ea3 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -129,7 +129,7 @@ struct paca_struct {
129 u8 soft_enabled; /* irq soft-enable flag */ 129 u8 soft_enabled; /* irq soft-enable flag */
130 u8 hard_enabled; /* set if irqs are enabled in MSR */ 130 u8 hard_enabled; /* set if irqs are enabled in MSR */
131 u8 io_sync; /* writel() needs spin_unlock sync */ 131 u8 io_sync; /* writel() needs spin_unlock sync */
132 u8 perf_event_pending; /* PM interrupt while soft-disabled */ 132 u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
133 133
134 /* Stuff for accurate time accounting */ 134 /* Stuff for accurate time accounting */
135 u64 user_time; /* accumulated usermode TB ticks */ 135 u64 user_time; /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 9cb4924b6c07..3129c855933c 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1092,7 +1092,7 @@ static int power_pmu_event_init(struct perf_event *event)
1092 * XXX we should check if the task is an idle task. 1092 * XXX we should check if the task is an idle task.
1093 */ 1093 */
1094 flags = 0; 1094 flags = 0;
1095 if (event->ctx->task) 1095 if (event->attach_state & PERF_ATTACH_TASK)
1096 flags |= PPMU_ONLY_COUNT_RUN; 1096 flags |= PPMU_ONLY_COUNT_RUN;
1097 1097
1098 /* 1098 /*
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 8533b3b83f5d..54888eb10c3b 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -53,7 +53,7 @@
53#include <linux/posix-timers.h> 53#include <linux/posix-timers.h>
54#include <linux/irq.h> 54#include <linux/irq.h>
55#include <linux/delay.h> 55#include <linux/delay.h>
56#include <linux/perf_event.h> 56#include <linux/irq_work.h>
57#include <asm/trace.h> 57#include <asm/trace.h>
58 58
59#include <asm/io.h> 59#include <asm/io.h>
@@ -493,60 +493,60 @@ void __init iSeries_time_init_early(void)
493} 493}
494#endif /* CONFIG_PPC_ISERIES */ 494#endif /* CONFIG_PPC_ISERIES */
495 495
496#ifdef CONFIG_PERF_EVENTS 496#ifdef CONFIG_IRQ_WORK
497 497
498/* 498/*
499 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... 499 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
500 */ 500 */
501#ifdef CONFIG_PPC64 501#ifdef CONFIG_PPC64
502static inline unsigned long test_perf_event_pending(void) 502static inline unsigned long test_irq_work_pending(void)
503{ 503{
504 unsigned long x; 504 unsigned long x;
505 505
506 asm volatile("lbz %0,%1(13)" 506 asm volatile("lbz %0,%1(13)"
507 : "=r" (x) 507 : "=r" (x)
508 : "i" (offsetof(struct paca_struct, perf_event_pending))); 508 : "i" (offsetof(struct paca_struct, irq_work_pending)));
509 return x; 509 return x;
510} 510}
511 511
512static inline void set_perf_event_pending_flag(void) 512static inline void set_irq_work_pending_flag(void)
513{ 513{
514 asm volatile("stb %0,%1(13)" : : 514 asm volatile("stb %0,%1(13)" : :
515 "r" (1), 515 "r" (1),
516 "i" (offsetof(struct paca_struct, perf_event_pending))); 516 "i" (offsetof(struct paca_struct, irq_work_pending)));
517} 517}
518 518
519static inline void clear_perf_event_pending(void) 519static inline void clear_irq_work_pending(void)
520{ 520{
521 asm volatile("stb %0,%1(13)" : : 521 asm volatile("stb %0,%1(13)" : :
522 "r" (0), 522 "r" (0),
523 "i" (offsetof(struct paca_struct, perf_event_pending))); 523 "i" (offsetof(struct paca_struct, irq_work_pending)));
524} 524}
525 525
526#else /* 32-bit */ 526#else /* 32-bit */
527 527
528DEFINE_PER_CPU(u8, perf_event_pending); 528DEFINE_PER_CPU(u8, irq_work_pending);
529 529
530#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 530#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
531#define test_perf_event_pending() __get_cpu_var(perf_event_pending) 531#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
532#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 532#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
533 533
534#endif /* 32 vs 64 bit */ 534#endif /* 32 vs 64 bit */
535 535
536void set_perf_event_pending(void) 536void set_irq_work_pending(void)
537{ 537{
538 preempt_disable(); 538 preempt_disable();
539 set_perf_event_pending_flag(); 539 set_irq_work_pending_flag();
540 set_dec(1); 540 set_dec(1);
541 preempt_enable(); 541 preempt_enable();
542} 542}
543 543
544#else /* CONFIG_PERF_EVENTS */ 544#else /* CONFIG_IRQ_WORK */
545 545
546#define test_perf_event_pending() 0 546#define test_irq_work_pending() 0
547#define clear_perf_event_pending() 547#define clear_irq_work_pending()
548 548
549#endif /* CONFIG_PERF_EVENTS */ 549#endif /* CONFIG_IRQ_WORK */
550 550
551/* 551/*
552 * For iSeries shared processors, we have to let the hypervisor 552 * For iSeries shared processors, we have to let the hypervisor
@@ -587,9 +587,9 @@ void timer_interrupt(struct pt_regs * regs)
587 587
588 calculate_steal_time(); 588 calculate_steal_time();
589 589
590 if (test_perf_event_pending()) { 590 if (test_irq_work_pending()) {
591 clear_perf_event_pending(); 591 clear_irq_work_pending();
592 perf_event_do_pending(); 592 irq_work_run();
593 } 593 }
594 594
595#ifdef CONFIG_PPC_ISERIES 595#ifdef CONFIG_PPC_ISERIES
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f0777a47e3a5..958f0dadeadf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -95,6 +95,7 @@ config S390
95 select HAVE_KVM if 64BIT 95 select HAVE_KVM if 64BIT
96 select HAVE_ARCH_TRACEHOOK 96 select HAVE_ARCH_TRACEHOOK
97 select INIT_ALL_POSSIBLE 97 select INIT_ALL_POSSIBLE
98 select HAVE_IRQ_WORK
98 select HAVE_PERF_EVENTS 99 select HAVE_PERF_EVENTS
99 select HAVE_KERNEL_GZIP 100 select HAVE_KERNEL_GZIP
100 select HAVE_KERNEL_BZIP2 101 select HAVE_KERNEL_BZIP2
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 498bc3892385..881d94590aeb 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -12,10 +12,6 @@
12#ifndef __ASM_HARDIRQ_H 12#ifndef __ASM_HARDIRQ_H
13#define __ASM_HARDIRQ_H 13#define __ASM_HARDIRQ_H
14 14
15#include <linux/threads.h>
16#include <linux/sched.h>
17#include <linux/cache.h>
18#include <linux/interrupt.h>
19#include <asm/lowcore.h> 15#include <asm/lowcore.h>
20 16
21#define local_softirq_pending() (S390_lowcore.softirq_pending) 17#define local_softirq_pending() (S390_lowcore.softirq_pending)
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 3840cbe77637..a75f168d2718 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -4,7 +4,6 @@
4 * Copyright 2009 Martin Schwidefsky, IBM Corporation. 4 * Copyright 2009 Martin Schwidefsky, IBM Corporation.
5 */ 5 */
6 6
7static inline void set_perf_event_pending(void) {} 7/* Empty, just to avoid compiling error */
8static inline void clear_perf_event_pending(void) {}
9 8
10#define PERF_EVENT_INDEX_OFFSET 0 9#define PERF_EVENT_INDEX_OFFSET 0
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 35b6c3f85173..35b6879628a0 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -16,6 +16,7 @@ config SUPERH
16 select HAVE_ARCH_TRACEHOOK 16 select HAVE_ARCH_TRACEHOOK
17 select HAVE_DMA_API_DEBUG 17 select HAVE_DMA_API_DEBUG
18 select HAVE_DMA_ATTRS 18 select HAVE_DMA_ATTRS
19 select HAVE_IRQ_WORK
19 select HAVE_PERF_EVENTS 20 select HAVE_PERF_EVENTS
20 select PERF_USE_VMALLOC 21 select PERF_USE_VMALLOC
21 select HAVE_KERNEL_GZIP 22 select HAVE_KERNEL_GZIP
diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h
index 3d0c9f36d150..14308bed7ea5 100644
--- a/arch/sh/include/asm/perf_event.h
+++ b/arch/sh/include/asm/perf_event.h
@@ -26,11 +26,4 @@ extern int register_sh_pmu(struct sh_pmu *);
26extern int reserve_pmc_hardware(void); 26extern int reserve_pmc_hardware(void);
27extern void release_pmc_hardware(void); 27extern void release_pmc_hardware(void);
28 28
29static inline void set_perf_event_pending(void)
30{
31 /* Nothing to see here, move along. */
32}
33
34#define PERF_EVENT_INDEX_OFFSET 0
35
36#endif /* __ASM_SH_PERF_EVENT_H */ 29#endif /* __ASM_SH_PERF_EVENT_H */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 9212cd42a832..3e9d31401fb2 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -26,6 +26,7 @@ config SPARC
26 select ARCH_WANT_OPTIONAL_GPIOLIB 26 select ARCH_WANT_OPTIONAL_GPIOLIB
27 select RTC_CLASS 27 select RTC_CLASS
28 select RTC_DRV_M48T59 28 select RTC_DRV_M48T59
29 select HAVE_IRQ_WORK
29 select HAVE_PERF_EVENTS 30 select HAVE_PERF_EVENTS
30 select PERF_USE_VMALLOC 31 select PERF_USE_VMALLOC
31 select HAVE_DMA_ATTRS 32 select HAVE_DMA_ATTRS
@@ -54,6 +55,7 @@ config SPARC64
54 select RTC_DRV_BQ4802 55 select RTC_DRV_BQ4802
55 select RTC_DRV_SUN4V 56 select RTC_DRV_SUN4V
56 select RTC_DRV_STARFIRE 57 select RTC_DRV_STARFIRE
58 select HAVE_IRQ_WORK
57 select HAVE_PERF_EVENTS 59 select HAVE_PERF_EVENTS
58 select PERF_USE_VMALLOC 60 select PERF_USE_VMALLOC
59 61
diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h
index 727af70646cb..6e8bfa1786da 100644
--- a/arch/sparc/include/asm/perf_event.h
+++ b/arch/sparc/include/asm/perf_event.h
@@ -1,10 +1,6 @@
1#ifndef __ASM_SPARC_PERF_EVENT_H 1#ifndef __ASM_SPARC_PERF_EVENT_H
2#define __ASM_SPARC_PERF_EVENT_H 2#define __ASM_SPARC_PERF_EVENT_H
3 3
4extern void set_perf_event_pending(void);
5
6#define PERF_EVENT_INDEX_OFFSET 0
7
8#ifdef CONFIG_PERF_EVENTS 4#ifdef CONFIG_PERF_EVENTS
9#include <asm/ptrace.h> 5#include <asm/ptrace.h>
10 6
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index c4a6a50b4849..b87873c0e8ea 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -7,7 +7,7 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/irq.h> 8#include <linux/irq.h>
9 9
10#include <linux/perf_event.h> 10#include <linux/irq_work.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12 12
13#include <asm/pil.h> 13#include <asm/pil.h>
@@ -43,14 +43,14 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
43 43
44 old_regs = set_irq_regs(regs); 44 old_regs = set_irq_regs(regs);
45 irq_enter(); 45 irq_enter();
46#ifdef CONFIG_PERF_EVENTS 46#ifdef CONFIG_IRQ_WORK
47 perf_event_do_pending(); 47 irq_work_run();
48#endif 48#endif
49 irq_exit(); 49 irq_exit();
50 set_irq_regs(old_regs); 50 set_irq_regs(old_regs);
51} 51}
52 52
53void set_perf_event_pending(void) 53void arch_irq_work_raise(void)
54{ 54{
55 set_softint(1 << PIL_DEFERRED_PCR_WORK); 55 set_softint(1 << PIL_DEFERRED_PCR_WORK);
56} 56}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9815221976a7..fd227d6b8d9c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -25,6 +25,7 @@ config X86
25 select HAVE_IDE 25 select HAVE_IDE
26 select HAVE_OPROFILE 26 select HAVE_OPROFILE
27 select HAVE_PERF_EVENTS if (!M386 && !M486) 27 select HAVE_PERF_EVENTS if (!M386 && !M486)
28 select HAVE_IRQ_WORK
28 select HAVE_IOREMAP_PROT 29 select HAVE_IOREMAP_PROT
29 select HAVE_KPROBES 30 select HAVE_KPROBES
30 select ARCH_WANT_OPTIONAL_GPIOLIB 31 select ARCH_WANT_OPTIONAL_GPIOLIB
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 8e8ec663a98f..b8e96a18676b 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -49,8 +49,8 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
49BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) 49BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
50BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) 50BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
51 51
52#ifdef CONFIG_PERF_EVENTS 52#ifdef CONFIG_IRQ_WORK
53BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) 53BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR)
54#endif 54#endif
55 55
56#ifdef CONFIG_X86_THERMAL_VECTOR 56#ifdef CONFIG_X86_THERMAL_VECTOR
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index aeab29aee617..55e4de613f0e 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -14,7 +14,7 @@ typedef struct {
14#endif 14#endif
15 unsigned int x86_platform_ipis; /* arch dependent */ 15 unsigned int x86_platform_ipis; /* arch dependent */
16 unsigned int apic_perf_irqs; 16 unsigned int apic_perf_irqs;
17 unsigned int apic_pending_irqs; 17 unsigned int apic_irq_work_irqs;
18#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
19 unsigned int irq_resched_count; 19 unsigned int irq_resched_count;
20 unsigned int irq_call_count; 20 unsigned int irq_call_count;
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 46c0fe05f230..3a54a1ca1a02 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -29,7 +29,7 @@
29extern void apic_timer_interrupt(void); 29extern void apic_timer_interrupt(void);
30extern void x86_platform_ipi(void); 30extern void x86_platform_ipi(void);
31extern void error_interrupt(void); 31extern void error_interrupt(void);
32extern void perf_pending_interrupt(void); 32extern void irq_work_interrupt(void);
33 33
34extern void spurious_interrupt(void); 34extern void spurious_interrupt(void);
35extern void thermal_interrupt(void); 35extern void thermal_interrupt(void);
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index e2ca30092557..6af0894dafb4 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -114,9 +114,9 @@
114#define X86_PLATFORM_IPI_VECTOR 0xed 114#define X86_PLATFORM_IPI_VECTOR 0xed
115 115
116/* 116/*
117 * Performance monitoring pending work vector: 117 * IRQ work vector:
118 */ 118 */
119#define LOCAL_PENDING_VECTOR 0xec 119#define IRQ_WORK_VECTOR 0xec
120 120
121#define UV_BAU_MESSAGE 0xea 121#define UV_BAU_MESSAGE 0xea
122 122
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 9d3f485e5dd0..7490bf8d1459 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -35,6 +35,7 @@ obj-y := process_$(BITS).o signal.o entry_$(BITS).o
35obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o 35obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
36obj-y += time.o ioport.o ldt.o dumpstack.o 36obj-y += time.o ioport.o ldt.o dumpstack.o
37obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o 37obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
38obj-$(CONFIG_IRQ_WORK) += irq_work.o
38obj-$(CONFIG_X86_VISWS) += visws_quirks.o 39obj-$(CONFIG_X86_VISWS) += visws_quirks.o
39obj-$(CONFIG_X86_32) += probe_roms_32.o 40obj-$(CONFIG_X86_32) += probe_roms_32.o
40obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o 41obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index e2513f26ba8b..fe73c1844a9a 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1196,25 +1196,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1196 return handled; 1196 return handled;
1197} 1197}
1198 1198
1199void smp_perf_pending_interrupt(struct pt_regs *regs)
1200{
1201 irq_enter();
1202 ack_APIC_irq();
1203 inc_irq_stat(apic_pending_irqs);
1204 perf_event_do_pending();
1205 irq_exit();
1206}
1207
1208void set_perf_event_pending(void)
1209{
1210#ifdef CONFIG_X86_LOCAL_APIC
1211 if (!x86_pmu.apic || !x86_pmu_initialized())
1212 return;
1213
1214 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
1215#endif
1216}
1217
1218void perf_events_lapic_init(void) 1199void perf_events_lapic_init(void)
1219{ 1200{
1220 if (!x86_pmu.apic || !x86_pmu_initialized()) 1201 if (!x86_pmu.apic || !x86_pmu_initialized())
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index c2897b7b4a3b..46d58448c3af 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -52,7 +52,7 @@ static __initconst const u64 amd_hw_cache_event_ids
52 [ C(DTLB) ] = { 52 [ C(DTLB) ] = {
53 [ C(OP_READ) ] = { 53 [ C(OP_READ) ] = {
54 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ 54 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
55 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */ 55 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
56 }, 56 },
57 [ C(OP_WRITE) ] = { 57 [ C(OP_WRITE) ] = {
58 [ C(RESULT_ACCESS) ] = 0, 58 [ C(RESULT_ACCESS) ] = 0,
@@ -66,7 +66,7 @@ static __initconst const u64 amd_hw_cache_event_ids
66 [ C(ITLB) ] = { 66 [ C(ITLB) ] = {
67 [ C(OP_READ) ] = { 67 [ C(OP_READ) ] = {
68 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ 68 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
69 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */ 69 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
70 }, 70 },
71 [ C(OP_WRITE) ] = { 71 [ C(OP_WRITE) ] = {
72 [ C(RESULT_ACCESS) ] = -1, 72 [ C(RESULT_ACCESS) ] = -1,
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 17be5ec7cbba..c375c79065f8 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1023,9 +1023,9 @@ apicinterrupt ERROR_APIC_VECTOR \
1023apicinterrupt SPURIOUS_APIC_VECTOR \ 1023apicinterrupt SPURIOUS_APIC_VECTOR \
1024 spurious_interrupt smp_spurious_interrupt 1024 spurious_interrupt smp_spurious_interrupt
1025 1025
1026#ifdef CONFIG_PERF_EVENTS 1026#ifdef CONFIG_IRQ_WORK
1027apicinterrupt LOCAL_PENDING_VECTOR \ 1027apicinterrupt IRQ_WORK_VECTOR \
1028 perf_pending_interrupt smp_perf_pending_interrupt 1028 irq_work_interrupt smp_irq_work_interrupt
1029#endif 1029#endif
1030 1030
1031/* 1031/*
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 91fd0c70a18a..44edb03fc9ec 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -67,10 +67,10 @@ static int show_other_interrupts(struct seq_file *p, int prec)
67 for_each_online_cpu(j) 67 for_each_online_cpu(j)
68 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); 68 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
69 seq_printf(p, " Performance monitoring interrupts\n"); 69 seq_printf(p, " Performance monitoring interrupts\n");
70 seq_printf(p, "%*s: ", prec, "PND"); 70 seq_printf(p, "%*s: ", prec, "IWI");
71 for_each_online_cpu(j) 71 for_each_online_cpu(j)
72 seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs); 72 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
73 seq_printf(p, " Performance pending work\n"); 73 seq_printf(p, " IRQ work interrupts\n");
74#endif 74#endif
75 if (x86_platform_ipi_callback) { 75 if (x86_platform_ipi_callback) {
76 seq_printf(p, "%*s: ", prec, "PLT"); 76 seq_printf(p, "%*s: ", prec, "PLT");
@@ -185,7 +185,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
185 sum += irq_stats(cpu)->apic_timer_irqs; 185 sum += irq_stats(cpu)->apic_timer_irqs;
186 sum += irq_stats(cpu)->irq_spurious_count; 186 sum += irq_stats(cpu)->irq_spurious_count;
187 sum += irq_stats(cpu)->apic_perf_irqs; 187 sum += irq_stats(cpu)->apic_perf_irqs;
188 sum += irq_stats(cpu)->apic_pending_irqs; 188 sum += irq_stats(cpu)->apic_irq_work_irqs;
189#endif 189#endif
190 if (x86_platform_ipi_callback) 190 if (x86_platform_ipi_callback)
191 sum += irq_stats(cpu)->x86_platform_ipis; 191 sum += irq_stats(cpu)->x86_platform_ipis;
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
new file mode 100644
index 000000000000..ca8f703a1e70
--- /dev/null
+++ b/arch/x86/kernel/irq_work.c
@@ -0,0 +1,30 @@
1/*
2 * x86 specific code for irq_work
3 *
4 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5 */
6
7#include <linux/kernel.h>
8#include <linux/irq_work.h>
9#include <linux/hardirq.h>
10#include <asm/apic.h>
11
12void smp_irq_work_interrupt(struct pt_regs *regs)
13{
14 irq_enter();
15 ack_APIC_irq();
16 inc_irq_stat(apic_irq_work_irqs);
17 irq_work_run();
18 irq_exit();
19}
20
21void arch_irq_work_raise(void)
22{
23#ifdef CONFIG_X86_LOCAL_APIC
24 if (!cpu_has_apic)
25 return;
26
27 apic->send_IPI_self(IRQ_WORK_VECTOR);
28 apic_wait_icr_idle();
29#endif
30}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 990ae7cfc578..713969b9266b 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -224,9 +224,9 @@ static void __init apic_intr_init(void)
224 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); 224 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
225 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 225 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
226 226
227 /* Performance monitoring interrupts: */ 227 /* IRQ work interrupts: */
228# ifdef CONFIG_PERF_EVENTS 228# ifdef CONFIG_IRQ_WORK
229 alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); 229 alloc_intr_gate(IRQ_WORK_VECTOR, irq_work_interrupt);
230# endif 230# endif
231 231
232#endif 232#endif
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
new file mode 100644
index 000000000000..4fa09d4d0b71
--- /dev/null
+++ b/include/linux/irq_work.h
@@ -0,0 +1,20 @@
1#ifndef _LINUX_IRQ_WORK_H
2#define _LINUX_IRQ_WORK_H
3
4struct irq_work {
5 struct irq_work *next;
6 void (*func)(struct irq_work *);
7};
8
9static inline
10void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *))
11{
12 entry->next = NULL;
13 entry->func = func;
14}
15
16bool irq_work_queue(struct irq_work *entry);
17void irq_work_run(void);
18void irq_work_sync(struct irq_work *entry);
19
20#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index b72cd9f92c2e..b67cb180e6e9 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -25,10 +25,10 @@ extern void jump_label_update(unsigned long key, enum jump_label_type type);
25extern void jump_label_apply_nops(struct module *mod); 25extern void jump_label_apply_nops(struct module *mod);
26extern int jump_label_text_reserved(void *start, void *end); 26extern int jump_label_text_reserved(void *start, void *end);
27 27
28#define enable_jump_label(key) \ 28#define jump_label_enable(key) \
29 jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); 29 jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE);
30 30
31#define disable_jump_label(key) \ 31#define jump_label_disable(key) \
32 jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); 32 jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE);
33 33
34#else 34#else
@@ -39,12 +39,12 @@ do { \
39 goto label; \ 39 goto label; \
40} while (0) 40} while (0)
41 41
42#define enable_jump_label(cond_var) \ 42#define jump_label_enable(cond_var) \
43do { \ 43do { \
44 *(cond_var) = 1; \ 44 *(cond_var) = 1; \
45} while (0) 45} while (0)
46 46
47#define disable_jump_label(cond_var) \ 47#define jump_label_disable(cond_var) \
48do { \ 48do { \
49 *(cond_var) = 0; \ 49 *(cond_var) = 0; \
50} while (0) 50} while (0)
@@ -61,4 +61,14 @@ static inline int jump_label_text_reserved(void *start, void *end)
61 61
62#endif 62#endif
63 63
64#define COND_STMT(key, stmt) \
65do { \
66 __label__ jl_enabled; \
67 JUMP_LABEL(key, jl_enabled); \
68 if (0) { \
69jl_enabled: \
70 stmt; \
71 } \
72} while (0)
73
64#endif 74#endif
diff --git a/include/linux/jump_label_ref.h b/include/linux/jump_label_ref.h
new file mode 100644
index 000000000000..e5d012ad92c6
--- /dev/null
+++ b/include/linux/jump_label_ref.h
@@ -0,0 +1,44 @@
1#ifndef _LINUX_JUMP_LABEL_REF_H
2#define _LINUX_JUMP_LABEL_REF_H
3
4#include <linux/jump_label.h>
5#include <asm/atomic.h>
6
7#ifdef HAVE_JUMP_LABEL
8
9static inline void jump_label_inc(atomic_t *key)
10{
11 if (atomic_add_return(1, key) == 1)
12 jump_label_enable(key);
13}
14
15static inline void jump_label_dec(atomic_t *key)
16{
17 if (atomic_dec_and_test(key))
18 jump_label_disable(key);
19}
20
21#else /* !HAVE_JUMP_LABEL */
22
23static inline void jump_label_inc(atomic_t *key)
24{
25 atomic_inc(key);
26}
27
28static inline void jump_label_dec(atomic_t *key)
29{
30 atomic_dec(key);
31}
32
33#undef JUMP_LABEL
34#define JUMP_LABEL(key, label) \
35do { \
36 if (unlikely(__builtin_choose_expr( \
37 __builtin_types_compatible_p(typeof(key), atomic_t *), \
38 atomic_read((atomic_t *)(key)), *(key)))) \
39 goto label; \
40} while (0)
41
42#endif /* HAVE_JUMP_LABEL */
43
44#endif /* _LINUX_JUMP_LABEL_REF_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a9227e985207..057bf22a8323 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -486,6 +486,8 @@ struct perf_guest_info_callbacks {
486#include <linux/workqueue.h> 486#include <linux/workqueue.h>
487#include <linux/ftrace.h> 487#include <linux/ftrace.h>
488#include <linux/cpu.h> 488#include <linux/cpu.h>
489#include <linux/irq_work.h>
490#include <linux/jump_label_ref.h>
489#include <asm/atomic.h> 491#include <asm/atomic.h>
490#include <asm/local.h> 492#include <asm/local.h>
491 493
@@ -535,6 +537,12 @@ struct hw_perf_event {
535 struct { /* breakpoint */ 537 struct { /* breakpoint */
536 struct arch_hw_breakpoint info; 538 struct arch_hw_breakpoint info;
537 struct list_head bp_list; 539 struct list_head bp_list;
540 /*
541 * Crufty hack to avoid the chicken and egg
542 * problem hw_breakpoint has with context
543 * creation and event initalization.
544 */
545 struct task_struct *bp_target;
538 }; 546 };
539#endif 547#endif
540 }; 548 };
@@ -672,11 +680,6 @@ struct perf_buffer {
672 void *data_pages[0]; 680 void *data_pages[0];
673}; 681};
674 682
675struct perf_pending_entry {
676 struct perf_pending_entry *next;
677 void (*func)(struct perf_pending_entry *);
678};
679
680struct perf_sample_data; 683struct perf_sample_data;
681 684
682typedef void (*perf_overflow_handler_t)(struct perf_event *, int, 685typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
@@ -697,6 +700,7 @@ struct swevent_hlist {
697 700
698#define PERF_ATTACH_CONTEXT 0x01 701#define PERF_ATTACH_CONTEXT 0x01
699#define PERF_ATTACH_GROUP 0x02 702#define PERF_ATTACH_GROUP 0x02
703#define PERF_ATTACH_TASK 0x04
700 704
701/** 705/**
702 * struct perf_event - performance event kernel representation: 706 * struct perf_event - performance event kernel representation:
@@ -784,7 +788,7 @@ struct perf_event {
784 int pending_wakeup; 788 int pending_wakeup;
785 int pending_kill; 789 int pending_kill;
786 int pending_disable; 790 int pending_disable;
787 struct perf_pending_entry pending; 791 struct irq_work pending;
788 792
789 atomic_t event_limit; 793 atomic_t event_limit;
790 794
@@ -892,14 +896,26 @@ extern void perf_pmu_unregister(struct pmu *pmu);
892 896
893extern int perf_num_counters(void); 897extern int perf_num_counters(void);
894extern const char *perf_pmu_name(void); 898extern const char *perf_pmu_name(void);
895extern void perf_event_task_sched_in(struct task_struct *task); 899extern void __perf_event_task_sched_in(struct task_struct *task);
896extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 900extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
901
902extern atomic_t perf_task_events;
903
904static inline void perf_event_task_sched_in(struct task_struct *task)
905{
906 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
907}
908
909static inline
910void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
911{
912 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
913}
914
897extern int perf_event_init_task(struct task_struct *child); 915extern int perf_event_init_task(struct task_struct *child);
898extern void perf_event_exit_task(struct task_struct *child); 916extern void perf_event_exit_task(struct task_struct *child);
899extern void perf_event_free_task(struct task_struct *task); 917extern void perf_event_free_task(struct task_struct *task);
900extern void perf_event_delayed_put(struct task_struct *task); 918extern void perf_event_delayed_put(struct task_struct *task);
901extern void set_perf_event_pending(void);
902extern void perf_event_do_pending(void);
903extern void perf_event_print_debug(void); 919extern void perf_event_print_debug(void);
904extern void perf_pmu_disable(struct pmu *pmu); 920extern void perf_pmu_disable(struct pmu *pmu);
905extern void perf_pmu_enable(struct pmu *pmu); 921extern void perf_pmu_enable(struct pmu *pmu);
@@ -988,18 +1004,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
988 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); 1004 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
989} 1005}
990 1006
991static inline void 1007static __always_inline void
992perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) 1008perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
993{ 1009{
994 if (atomic_read(&perf_swevent_enabled[event_id])) { 1010 struct pt_regs hot_regs;
995 struct pt_regs hot_regs; 1011
996 1012 JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
997 if (!regs) { 1013 return;
998 perf_fetch_caller_regs(&hot_regs); 1014
999 regs = &hot_regs; 1015have_event:
1000 } 1016 if (!regs) {
1001 __perf_sw_event(event_id, nr, nmi, regs, addr); 1017 perf_fetch_caller_regs(&hot_regs);
1018 regs = &hot_regs;
1002 } 1019 }
1020 __perf_sw_event(event_id, nr, nmi, regs, addr);
1003} 1021}
1004 1022
1005extern void perf_event_mmap(struct vm_area_struct *vma); 1023extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1078,7 +1096,6 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1078static inline void perf_event_exit_task(struct task_struct *child) { } 1096static inline void perf_event_exit_task(struct task_struct *child) { }
1079static inline void perf_event_free_task(struct task_struct *task) { } 1097static inline void perf_event_free_task(struct task_struct *task) { }
1080static inline void perf_event_delayed_put(struct task_struct *task) { } 1098static inline void perf_event_delayed_put(struct task_struct *task) { }
1081static inline void perf_event_do_pending(void) { }
1082static inline void perf_event_print_debug(void) { } 1099static inline void perf_event_print_debug(void) { }
1083static inline int perf_event_task_disable(void) { return -EINVAL; } 1100static inline int perf_event_task_disable(void) { return -EINVAL; }
1084static inline int perf_event_task_enable(void) { return -EINVAL; } 1101static inline int perf_event_task_enable(void) { return -EINVAL; }
diff --git a/init/Kconfig b/init/Kconfig
index 2de5b1cbadd9..1ef0b439908e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -21,6 +21,13 @@ config CONSTRUCTORS
21 depends on !UML 21 depends on !UML
22 default y 22 default y
23 23
24config HAVE_IRQ_WORK
25 bool
26
27config IRQ_WORK
28 bool
29 depends on HAVE_IRQ_WORK
30
24menu "General setup" 31menu "General setup"
25 32
26config EXPERIMENTAL 33config EXPERIMENTAL
@@ -987,6 +994,7 @@ config PERF_EVENTS
987 default y if (PROFILING || PERF_COUNTERS) 994 default y if (PROFILING || PERF_COUNTERS)
988 depends on HAVE_PERF_EVENTS 995 depends on HAVE_PERF_EVENTS
989 select ANON_INODES 996 select ANON_INODES
997 select IRQ_WORK
990 help 998 help
991 Enable kernel support for various performance events provided 999 Enable kernel support for various performance events provided
992 by software and hardware. 1000 by software and hardware.
diff --git a/kernel/Makefile b/kernel/Makefile
index d52b473c99a1..4d9bf5f8531f 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -23,6 +23,7 @@ CFLAGS_REMOVE_rtmutex-debug.o = -pg
23CFLAGS_REMOVE_cgroup-debug.o = -pg 23CFLAGS_REMOVE_cgroup-debug.o = -pg
24CFLAGS_REMOVE_sched_clock.o = -pg 24CFLAGS_REMOVE_sched_clock.o = -pg
25CFLAGS_REMOVE_perf_event.o = -pg 25CFLAGS_REMOVE_perf_event.o = -pg
26CFLAGS_REMOVE_irq_work.o = -pg
26endif 27endif
27 28
28obj-$(CONFIG_FREEZER) += freezer.o 29obj-$(CONFIG_FREEZER) += freezer.o
@@ -100,6 +101,7 @@ obj-$(CONFIG_TRACING) += trace/
100obj-$(CONFIG_X86_DS) += trace/ 101obj-$(CONFIG_X86_DS) += trace/
101obj-$(CONFIG_RING_BUFFER) += trace/ 102obj-$(CONFIG_RING_BUFFER) += trace/
102obj-$(CONFIG_SMP) += sched_cpupri.o 103obj-$(CONFIG_SMP) += sched_cpupri.o
104obj-$(CONFIG_IRQ_WORK) += irq_work.o
103obj-$(CONFIG_PERF_EVENTS) += perf_event.o 105obj-$(CONFIG_PERF_EVENTS) += perf_event.o
104obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 106obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
105obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o 107obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 3b714e839c10..2c9120f0afca 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -113,12 +113,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
113 */ 113 */
114static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) 114static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
115{ 115{
116 struct perf_event_context *ctx = bp->ctx; 116 struct task_struct *tsk = bp->hw.bp_target;
117 struct perf_event *iter; 117 struct perf_event *iter;
118 int count = 0; 118 int count = 0;
119 119
120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) { 120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
121 if (iter->ctx == ctx && find_slot_idx(iter) == type) 121 if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type)
122 count += hw_breakpoint_weight(iter); 122 count += hw_breakpoint_weight(iter);
123 } 123 }
124 124
@@ -134,7 +134,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
134 enum bp_type_idx type) 134 enum bp_type_idx type)
135{ 135{
136 int cpu = bp->cpu; 136 int cpu = bp->cpu;
137 struct task_struct *tsk = bp->ctx->task; 137 struct task_struct *tsk = bp->hw.bp_target;
138 138
139 if (cpu >= 0) { 139 if (cpu >= 0) {
140 slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); 140 slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
@@ -213,7 +213,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
213 int weight) 213 int weight)
214{ 214{
215 int cpu = bp->cpu; 215 int cpu = bp->cpu;
216 struct task_struct *tsk = bp->ctx->task; 216 struct task_struct *tsk = bp->hw.bp_target;
217 217
218 /* Pinned counter cpu profiling */ 218 /* Pinned counter cpu profiling */
219 if (!tsk) { 219 if (!tsk) {
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
new file mode 100644
index 000000000000..f16763ff8481
--- /dev/null
+++ b/kernel/irq_work.c
@@ -0,0 +1,164 @@
1/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/irq_work.h>
11#include <linux/hardirq.h>
12
13/*
14 * An entry can be in one of four states:
15 *
16 * free NULL, 0 -> {claimed} : free to be used
17 * claimed NULL, 3 -> {pending} : claimed to be enqueued
18 * pending next, 3 -> {busy} : queued, pending callback
19 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
20 *
21 * We use the lower two bits of the next pointer to keep PENDING and BUSY
22 * flags.
23 */
24
25#define IRQ_WORK_PENDING 1UL
26#define IRQ_WORK_BUSY 2UL
27#define IRQ_WORK_FLAGS 3UL
28
29static inline bool irq_work_is_set(struct irq_work *entry, int flags)
30{
31 return (unsigned long)entry->next & flags;
32}
33
34static inline struct irq_work *irq_work_next(struct irq_work *entry)
35{
36 unsigned long next = (unsigned long)entry->next;
37 next &= ~IRQ_WORK_FLAGS;
38 return (struct irq_work *)next;
39}
40
41static inline struct irq_work *next_flags(struct irq_work *entry, int flags)
42{
43 unsigned long next = (unsigned long)entry;
44 next |= flags;
45 return (struct irq_work *)next;
46}
47
48static DEFINE_PER_CPU(struct irq_work *, irq_work_list);
49
50/*
51 * Claim the entry so that no one else will poke at it.
52 */
53static bool irq_work_claim(struct irq_work *entry)
54{
55 struct irq_work *next, *nflags;
56
57 do {
58 next = entry->next;
59 if ((unsigned long)next & IRQ_WORK_PENDING)
60 return false;
61 nflags = next_flags(next, IRQ_WORK_FLAGS);
62 } while (cmpxchg(&entry->next, next, nflags) != next);
63
64 return true;
65}
66
67
68void __weak arch_irq_work_raise(void)
69{
70 /*
71 * Lame architectures will get the timer tick callback
72 */
73}
74
75/*
76 * Queue the entry and raise the IPI if needed.
77 */
78static void __irq_work_queue(struct irq_work *entry)
79{
80 struct irq_work **head, *next;
81
82 head = &get_cpu_var(irq_work_list);
83
84 do {
85 next = *head;
86 /* Can assign non-atomic because we keep the flags set. */
87 entry->next = next_flags(next, IRQ_WORK_FLAGS);
88 } while (cmpxchg(head, next, entry) != next);
89
90 /* The list was empty, raise self-interrupt to start processing. */
91 if (!irq_work_next(entry))
92 arch_irq_work_raise();
93
94 put_cpu_var(irq_work_list);
95}
96
97/*
98 * Enqueue the irq_work @entry, returns true on success, failure when the
99 * @entry was already enqueued by someone else.
100 *
101 * Can be re-enqueued while the callback is still in progress.
102 */
103bool irq_work_queue(struct irq_work *entry)
104{
105 if (!irq_work_claim(entry)) {
106 /*
107 * Already enqueued, can't do!
108 */
109 return false;
110 }
111
112 __irq_work_queue(entry);
113 return true;
114}
115EXPORT_SYMBOL_GPL(irq_work_queue);
116
117/*
118 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
119 * context with local IRQs disabled.
120 */
121void irq_work_run(void)
122{
123 struct irq_work *list, **head;
124
125 head = &__get_cpu_var(irq_work_list);
126 if (*head == NULL)
127 return;
128
129 BUG_ON(!in_irq());
130 BUG_ON(!irqs_disabled());
131
132 list = xchg(head, NULL);
133 while (list != NULL) {
134 struct irq_work *entry = list;
135
136 list = irq_work_next(list);
137
138 /*
139 * Clear the PENDING bit, after this point the @entry
140 * can be re-used.
141 */
142 entry->next = next_flags(NULL, IRQ_WORK_BUSY);
143 entry->func(entry);
144 /*
145 * Clear the BUSY bit and return to the free state if
146 * no-one else claimed it meanwhile.
147 */
148 cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL);
149 }
150}
151EXPORT_SYMBOL_GPL(irq_work_run);
152
153/*
154 * Synchronize against the irq_work @entry, ensures the entry is not
155 * currently in use.
156 */
157void irq_work_sync(struct irq_work *entry)
158{
159 WARN_ON_ONCE(irqs_disabled());
160
161 while (irq_work_is_set(entry, IRQ_WORK_BUSY))
162 cpu_relax();
163}
164EXPORT_SYMBOL_GPL(irq_work_sync);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 1ec3916ffef0..05ecf6f7c672 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -34,7 +34,7 @@
34 34
35#include <asm/irq_regs.h> 35#include <asm/irq_regs.h>
36 36
37static atomic_t nr_events __read_mostly; 37atomic_t perf_task_events __read_mostly;
38static atomic_t nr_mmap_events __read_mostly; 38static atomic_t nr_mmap_events __read_mostly;
39static atomic_t nr_comm_events __read_mostly; 39static atomic_t nr_comm_events __read_mostly;
40static atomic_t nr_task_events __read_mostly; 40static atomic_t nr_task_events __read_mostly;
@@ -315,7 +315,12 @@ static void perf_group_attach(struct perf_event *event)
315{ 315{
316 struct perf_event *group_leader = event->group_leader; 316 struct perf_event *group_leader = event->group_leader;
317 317
318 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP); 318 /*
319 * We can have double attach due to group movement in perf_event_open.
320 */
321 if (event->attach_state & PERF_ATTACH_GROUP)
322 return;
323
319 event->attach_state |= PERF_ATTACH_GROUP; 324 event->attach_state |= PERF_ATTACH_GROUP;
320 325
321 if (group_leader == event) 326 if (group_leader == event)
@@ -412,8 +417,8 @@ event_filter_match(struct perf_event *event)
412 return event->cpu == -1 || event->cpu == smp_processor_id(); 417 return event->cpu == -1 || event->cpu == smp_processor_id();
413} 418}
414 419
415static void 420static int
416event_sched_out(struct perf_event *event, 421__event_sched_out(struct perf_event *event,
417 struct perf_cpu_context *cpuctx, 422 struct perf_cpu_context *cpuctx,
418 struct perf_event_context *ctx) 423 struct perf_event_context *ctx)
419{ 424{
@@ -432,14 +437,13 @@ event_sched_out(struct perf_event *event,
432 } 437 }
433 438
434 if (event->state != PERF_EVENT_STATE_ACTIVE) 439 if (event->state != PERF_EVENT_STATE_ACTIVE)
435 return; 440 return 0;
436 441
437 event->state = PERF_EVENT_STATE_INACTIVE; 442 event->state = PERF_EVENT_STATE_INACTIVE;
438 if (event->pending_disable) { 443 if (event->pending_disable) {
439 event->pending_disable = 0; 444 event->pending_disable = 0;
440 event->state = PERF_EVENT_STATE_OFF; 445 event->state = PERF_EVENT_STATE_OFF;
441 } 446 }
442 event->tstamp_stopped = ctx->time;
443 event->pmu->del(event, 0); 447 event->pmu->del(event, 0);
444 event->oncpu = -1; 448 event->oncpu = -1;
445 449
@@ -448,6 +452,19 @@ event_sched_out(struct perf_event *event,
448 ctx->nr_active--; 452 ctx->nr_active--;
449 if (event->attr.exclusive || !cpuctx->active_oncpu) 453 if (event->attr.exclusive || !cpuctx->active_oncpu)
450 cpuctx->exclusive = 0; 454 cpuctx->exclusive = 0;
455 return 1;
456}
457
458static void
459event_sched_out(struct perf_event *event,
460 struct perf_cpu_context *cpuctx,
461 struct perf_event_context *ctx)
462{
463 int ret;
464
465 ret = __event_sched_out(event, cpuctx, ctx);
466 if (ret)
467 event->tstamp_stopped = ctx->time;
451} 468}
452 469
453static void 470static void
@@ -647,7 +664,7 @@ retry:
647} 664}
648 665
649static int 666static int
650event_sched_in(struct perf_event *event, 667__event_sched_in(struct perf_event *event,
651 struct perf_cpu_context *cpuctx, 668 struct perf_cpu_context *cpuctx,
652 struct perf_event_context *ctx) 669 struct perf_event_context *ctx)
653{ 670{
@@ -667,8 +684,6 @@ event_sched_in(struct perf_event *event,
667 return -EAGAIN; 684 return -EAGAIN;
668 } 685 }
669 686
670 event->tstamp_running += ctx->time - event->tstamp_stopped;
671
672 if (!is_software_event(event)) 687 if (!is_software_event(event))
673 cpuctx->active_oncpu++; 688 cpuctx->active_oncpu++;
674 ctx->nr_active++; 689 ctx->nr_active++;
@@ -679,6 +694,35 @@ event_sched_in(struct perf_event *event,
679 return 0; 694 return 0;
680} 695}
681 696
697static inline int
698event_sched_in(struct perf_event *event,
699 struct perf_cpu_context *cpuctx,
700 struct perf_event_context *ctx)
701{
702 int ret = __event_sched_in(event, cpuctx, ctx);
703 if (ret)
704 return ret;
705 event->tstamp_running += ctx->time - event->tstamp_stopped;
706 return 0;
707}
708
709static void
710group_commit_event_sched_in(struct perf_event *group_event,
711 struct perf_cpu_context *cpuctx,
712 struct perf_event_context *ctx)
713{
714 struct perf_event *event;
715 u64 now = ctx->time;
716
717 group_event->tstamp_running += now - group_event->tstamp_stopped;
718 /*
719 * Schedule in siblings as one group (if any):
720 */
721 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
722 event->tstamp_running += now - event->tstamp_stopped;
723 }
724}
725
682static int 726static int
683group_sched_in(struct perf_event *group_event, 727group_sched_in(struct perf_event *group_event,
684 struct perf_cpu_context *cpuctx, 728 struct perf_cpu_context *cpuctx,
@@ -692,7 +736,13 @@ group_sched_in(struct perf_event *group_event,
692 736
693 pmu->start_txn(pmu); 737 pmu->start_txn(pmu);
694 738
695 if (event_sched_in(group_event, cpuctx, ctx)) { 739 /*
740 * use __event_sched_in() to delay updating tstamp_running
741 * until the transaction is committed. In case of failure
742 * we will keep an unmodified tstamp_running which is a
743 * requirement to get correct timing information
744 */
745 if (__event_sched_in(group_event, cpuctx, ctx)) {
696 pmu->cancel_txn(pmu); 746 pmu->cancel_txn(pmu);
697 return -EAGAIN; 747 return -EAGAIN;
698 } 748 }
@@ -701,26 +751,31 @@ group_sched_in(struct perf_event *group_event,
701 * Schedule in siblings as one group (if any): 751 * Schedule in siblings as one group (if any):
702 */ 752 */
703 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 753 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
704 if (event_sched_in(event, cpuctx, ctx)) { 754 if (__event_sched_in(event, cpuctx, ctx)) {
705 partial_group = event; 755 partial_group = event;
706 goto group_error; 756 goto group_error;
707 } 757 }
708 } 758 }
709 759
710 if (!pmu->commit_txn(pmu)) 760 if (!pmu->commit_txn(pmu)) {
761 /* commit tstamp_running */
762 group_commit_event_sched_in(group_event, cpuctx, ctx);
711 return 0; 763 return 0;
712 764 }
713group_error: 765group_error:
714 /* 766 /*
715 * Groups can be scheduled in as one unit only, so undo any 767 * Groups can be scheduled in as one unit only, so undo any
716 * partial group before returning: 768 * partial group before returning:
769 *
770 * use __event_sched_out() to avoid updating tstamp_stopped
771 * because the event never actually ran
717 */ 772 */
718 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 773 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
719 if (event == partial_group) 774 if (event == partial_group)
720 break; 775 break;
721 event_sched_out(event, cpuctx, ctx); 776 __event_sched_out(event, cpuctx, ctx);
722 } 777 }
723 event_sched_out(group_event, cpuctx, ctx); 778 __event_sched_out(group_event, cpuctx, ctx);
724 779
725 pmu->cancel_txn(pmu); 780 pmu->cancel_txn(pmu);
726 781
@@ -1256,8 +1311,8 @@ void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1256 * accessing the event control register. If a NMI hits, then it will 1311 * accessing the event control register. If a NMI hits, then it will
1257 * not restart the event. 1312 * not restart the event.
1258 */ 1313 */
1259void perf_event_task_sched_out(struct task_struct *task, 1314void __perf_event_task_sched_out(struct task_struct *task,
1260 struct task_struct *next) 1315 struct task_struct *next)
1261{ 1316{
1262 int ctxn; 1317 int ctxn;
1263 1318
@@ -1285,14 +1340,6 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
1285/* 1340/*
1286 * Called with IRQs disabled 1341 * Called with IRQs disabled
1287 */ 1342 */
1288static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1289{
1290 task_ctx_sched_out(ctx, EVENT_ALL);
1291}
1292
1293/*
1294 * Called with IRQs disabled
1295 */
1296static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 1343static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1297 enum event_type_t event_type) 1344 enum event_type_t event_type)
1298{ 1345{
@@ -1439,7 +1486,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx)
1439 * accessing the event control register. If a NMI hits, then it will 1486 * accessing the event control register. If a NMI hits, then it will
1440 * keep the event running. 1487 * keep the event running.
1441 */ 1488 */
1442void perf_event_task_sched_in(struct task_struct *task) 1489void __perf_event_task_sched_in(struct task_struct *task)
1443{ 1490{
1444 struct perf_event_context *ctx; 1491 struct perf_event_context *ctx;
1445 int ctxn; 1492 int ctxn;
@@ -1780,7 +1827,13 @@ static u64 perf_event_read(struct perf_event *event)
1780 unsigned long flags; 1827 unsigned long flags;
1781 1828
1782 raw_spin_lock_irqsave(&ctx->lock, flags); 1829 raw_spin_lock_irqsave(&ctx->lock, flags);
1783 update_context_time(ctx); 1830 /*
1831 * may read while context is not active
1832 * (e.g., thread is blocked), in that case
1833 * we cannot update context time
1834 */
1835 if (ctx->is_active)
1836 update_context_time(ctx);
1784 update_event_times(event); 1837 update_event_times(event);
1785 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1838 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1786 } 1839 }
@@ -2129,11 +2182,9 @@ retry:
2129 } 2182 }
2130 } 2183 }
2131 2184
2132 put_task_struct(task);
2133 return ctx; 2185 return ctx;
2134 2186
2135errout: 2187errout:
2136 put_task_struct(task);
2137 return ERR_PTR(err); 2188 return ERR_PTR(err);
2138} 2189}
2139 2190
@@ -2150,15 +2201,15 @@ static void free_event_rcu(struct rcu_head *head)
2150 kfree(event); 2201 kfree(event);
2151} 2202}
2152 2203
2153static void perf_pending_sync(struct perf_event *event);
2154static void perf_buffer_put(struct perf_buffer *buffer); 2204static void perf_buffer_put(struct perf_buffer *buffer);
2155 2205
2156static void free_event(struct perf_event *event) 2206static void free_event(struct perf_event *event)
2157{ 2207{
2158 perf_pending_sync(event); 2208 irq_work_sync(&event->pending);
2159 2209
2160 if (!event->parent) { 2210 if (!event->parent) {
2161 atomic_dec(&nr_events); 2211 if (event->attach_state & PERF_ATTACH_TASK)
2212 jump_label_dec(&perf_task_events);
2162 if (event->attr.mmap || event->attr.mmap_data) 2213 if (event->attr.mmap || event->attr.mmap_data)
2163 atomic_dec(&nr_mmap_events); 2214 atomic_dec(&nr_mmap_events);
2164 if (event->attr.comm) 2215 if (event->attr.comm)
@@ -3106,16 +3157,7 @@ void perf_event_wakeup(struct perf_event *event)
3106 } 3157 }
3107} 3158}
3108 3159
3109/* 3160static void perf_pending_event(struct irq_work *entry)
3110 * Pending wakeups
3111 *
3112 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
3113 *
3114 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
3115 * single linked list and use cmpxchg() to add entries lockless.
3116 */
3117
3118static void perf_pending_event(struct perf_pending_entry *entry)
3119{ 3161{
3120 struct perf_event *event = container_of(entry, 3162 struct perf_event *event = container_of(entry,
3121 struct perf_event, pending); 3163 struct perf_event, pending);
@@ -3131,89 +3173,6 @@ static void perf_pending_event(struct perf_pending_entry *entry)
3131 } 3173 }
3132} 3174}
3133 3175
3134#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
3135
3136static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
3137 PENDING_TAIL,
3138};
3139
3140static void perf_pending_queue(struct perf_pending_entry *entry,
3141 void (*func)(struct perf_pending_entry *))
3142{
3143 struct perf_pending_entry **head;
3144
3145 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
3146 return;
3147
3148 entry->func = func;
3149
3150 head = &get_cpu_var(perf_pending_head);
3151
3152 do {
3153 entry->next = *head;
3154 } while (cmpxchg(head, entry->next, entry) != entry->next);
3155
3156 set_perf_event_pending();
3157
3158 put_cpu_var(perf_pending_head);
3159}
3160
3161static int __perf_pending_run(void)
3162{
3163 struct perf_pending_entry *list;
3164 int nr = 0;
3165
3166 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
3167 while (list != PENDING_TAIL) {
3168 void (*func)(struct perf_pending_entry *);
3169 struct perf_pending_entry *entry = list;
3170
3171 list = list->next;
3172
3173 func = entry->func;
3174 entry->next = NULL;
3175 /*
3176 * Ensure we observe the unqueue before we issue the wakeup,
3177 * so that we won't be waiting forever.
3178 * -- see perf_not_pending().
3179 */
3180 smp_wmb();
3181
3182 func(entry);
3183 nr++;
3184 }
3185
3186 return nr;
3187}
3188
3189static inline int perf_not_pending(struct perf_event *event)
3190{
3191 /*
3192 * If we flush on whatever cpu we run, there is a chance we don't
3193 * need to wait.
3194 */
3195 get_cpu();
3196 __perf_pending_run();
3197 put_cpu();
3198
3199 /*
3200 * Ensure we see the proper queue state before going to sleep
3201 * so that we do not miss the wakeup. -- see perf_pending_handle()
3202 */
3203 smp_rmb();
3204 return event->pending.next == NULL;
3205}
3206
3207static void perf_pending_sync(struct perf_event *event)
3208{
3209 wait_event(event->waitq, perf_not_pending(event));
3210}
3211
3212void perf_event_do_pending(void)
3213{
3214 __perf_pending_run();
3215}
3216
3217/* 3176/*
3218 * We assume there is only KVM supporting the callbacks. 3177 * We assume there is only KVM supporting the callbacks.
3219 * Later on, we might change it to a list if there is 3178 * Later on, we might change it to a list if there is
@@ -3263,8 +3222,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
3263 3222
3264 if (handle->nmi) { 3223 if (handle->nmi) {
3265 handle->event->pending_wakeup = 1; 3224 handle->event->pending_wakeup = 1;
3266 perf_pending_queue(&handle->event->pending, 3225 irq_work_queue(&handle->event->pending);
3267 perf_pending_event);
3268 } else 3226 } else
3269 perf_event_wakeup(handle->event); 3227 perf_event_wakeup(handle->event);
3270} 3228}
@@ -4300,8 +4258,7 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
4300 event->pending_kill = POLL_HUP; 4258 event->pending_kill = POLL_HUP;
4301 if (nmi) { 4259 if (nmi) {
4302 event->pending_disable = 1; 4260 event->pending_disable = 1;
4303 perf_pending_queue(&event->pending, 4261 irq_work_queue(&event->pending);
4304 perf_pending_event);
4305 } else 4262 } else
4306 perf_event_disable(event); 4263 perf_event_disable(event);
4307 } 4264 }
@@ -4712,7 +4669,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
4712 4669
4713 WARN_ON(event->parent); 4670 WARN_ON(event->parent);
4714 4671
4715 atomic_dec(&perf_swevent_enabled[event_id]); 4672 jump_label_dec(&perf_swevent_enabled[event_id]);
4716 swevent_hlist_put(event); 4673 swevent_hlist_put(event);
4717} 4674}
4718 4675
@@ -4742,7 +4699,7 @@ static int perf_swevent_init(struct perf_event *event)
4742 if (err) 4699 if (err)
4743 return err; 4700 return err;
4744 4701
4745 atomic_inc(&perf_swevent_enabled[event_id]); 4702 jump_label_inc(&perf_swevent_enabled[event_id]);
4746 event->destroy = sw_perf_event_destroy; 4703 event->destroy = sw_perf_event_destroy;
4747 } 4704 }
4748 4705
@@ -5291,9 +5248,10 @@ unlock:
5291 */ 5248 */
5292static struct perf_event * 5249static struct perf_event *
5293perf_event_alloc(struct perf_event_attr *attr, int cpu, 5250perf_event_alloc(struct perf_event_attr *attr, int cpu,
5294 struct perf_event *group_leader, 5251 struct task_struct *task,
5295 struct perf_event *parent_event, 5252 struct perf_event *group_leader,
5296 perf_overflow_handler_t overflow_handler) 5253 struct perf_event *parent_event,
5254 perf_overflow_handler_t overflow_handler)
5297{ 5255{
5298 struct pmu *pmu; 5256 struct pmu *pmu;
5299 struct perf_event *event; 5257 struct perf_event *event;
@@ -5318,6 +5276,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
5318 INIT_LIST_HEAD(&event->event_entry); 5276 INIT_LIST_HEAD(&event->event_entry);
5319 INIT_LIST_HEAD(&event->sibling_list); 5277 INIT_LIST_HEAD(&event->sibling_list);
5320 init_waitqueue_head(&event->waitq); 5278 init_waitqueue_head(&event->waitq);
5279 init_irq_work(&event->pending, perf_pending_event);
5321 5280
5322 mutex_init(&event->mmap_mutex); 5281 mutex_init(&event->mmap_mutex);
5323 5282
@@ -5334,6 +5293,17 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
5334 5293
5335 event->state = PERF_EVENT_STATE_INACTIVE; 5294 event->state = PERF_EVENT_STATE_INACTIVE;
5336 5295
5296 if (task) {
5297 event->attach_state = PERF_ATTACH_TASK;
5298#ifdef CONFIG_HAVE_HW_BREAKPOINT
5299 /*
5300 * hw_breakpoint is a bit difficult here..
5301 */
5302 if (attr->type == PERF_TYPE_BREAKPOINT)
5303 event->hw.bp_target = task;
5304#endif
5305 }
5306
5337 if (!overflow_handler && parent_event) 5307 if (!overflow_handler && parent_event)
5338 overflow_handler = parent_event->overflow_handler; 5308 overflow_handler = parent_event->overflow_handler;
5339 5309
@@ -5377,7 +5347,8 @@ done:
5377 event->pmu = pmu; 5347 event->pmu = pmu;
5378 5348
5379 if (!event->parent) { 5349 if (!event->parent) {
5380 atomic_inc(&nr_events); 5350 if (event->attach_state & PERF_ATTACH_TASK)
5351 jump_label_inc(&perf_task_events);
5381 if (event->attr.mmap || event->attr.mmap_data) 5352 if (event->attr.mmap || event->attr.mmap_data)
5382 atomic_inc(&nr_mmap_events); 5353 atomic_inc(&nr_mmap_events);
5383 if (event->attr.comm) 5354 if (event->attr.comm)
@@ -5586,10 +5557,18 @@ SYSCALL_DEFINE5(perf_event_open,
5586 group_leader = NULL; 5557 group_leader = NULL;
5587 } 5558 }
5588 5559
5589 event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL); 5560 if (pid != -1) {
5561 task = find_lively_task_by_vpid(pid);
5562 if (IS_ERR(task)) {
5563 err = PTR_ERR(task);
5564 goto err_group_fd;
5565 }
5566 }
5567
5568 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
5590 if (IS_ERR(event)) { 5569 if (IS_ERR(event)) {
5591 err = PTR_ERR(event); 5570 err = PTR_ERR(event);
5592 goto err_fd; 5571 goto err_task;
5593 } 5572 }
5594 5573
5595 /* 5574 /*
@@ -5621,21 +5600,13 @@ SYSCALL_DEFINE5(perf_event_open,
5621 } 5600 }
5622 } 5601 }
5623 5602
5624 if (pid != -1) {
5625 task = find_lively_task_by_vpid(pid);
5626 if (IS_ERR(task)) {
5627 err = PTR_ERR(task);
5628 goto err_group_fd;
5629 }
5630 }
5631
5632 /* 5603 /*
5633 * Get the target context (task or percpu): 5604 * Get the target context (task or percpu):
5634 */ 5605 */
5635 ctx = find_get_context(pmu, task, cpu); 5606 ctx = find_get_context(pmu, task, cpu);
5636 if (IS_ERR(ctx)) { 5607 if (IS_ERR(ctx)) {
5637 err = PTR_ERR(ctx); 5608 err = PTR_ERR(ctx);
5638 goto err_group_fd; 5609 goto err_alloc;
5639 } 5610 }
5640 5611
5641 /* 5612 /*
@@ -5731,9 +5702,13 @@ SYSCALL_DEFINE5(perf_event_open,
5731 5702
5732err_context: 5703err_context:
5733 put_ctx(ctx); 5704 put_ctx(ctx);
5705err_alloc:
5706 free_event(event);
5707err_task:
5708 if (task)
5709 put_task_struct(task);
5734err_group_fd: 5710err_group_fd:
5735 fput_light(group_file, fput_needed); 5711 fput_light(group_file, fput_needed);
5736 free_event(event);
5737err_fd: 5712err_fd:
5738 put_unused_fd(event_fd); 5713 put_unused_fd(event_fd);
5739 return err; 5714 return err;
@@ -5759,7 +5734,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
5759 * Get the target context (task or percpu): 5734 * Get the target context (task or percpu):
5760 */ 5735 */
5761 5736
5762 event = perf_event_alloc(attr, cpu, NULL, NULL, overflow_handler); 5737 event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
5763 if (IS_ERR(event)) { 5738 if (IS_ERR(event)) {
5764 err = PTR_ERR(event); 5739 err = PTR_ERR(event);
5765 goto err; 5740 goto err;
@@ -5868,7 +5843,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
5868 * our context. 5843 * our context.
5869 */ 5844 */
5870 child_ctx = child->perf_event_ctxp[ctxn]; 5845 child_ctx = child->perf_event_ctxp[ctxn];
5871 __perf_event_task_sched_out(child_ctx); 5846 task_ctx_sched_out(child_ctx, EVENT_ALL);
5872 5847
5873 /* 5848 /*
5874 * Take the context lock here so that if find_get_context is 5849 * Take the context lock here so that if find_get_context is
@@ -6027,6 +6002,7 @@ inherit_event(struct perf_event *parent_event,
6027 6002
6028 child_event = perf_event_alloc(&parent_event->attr, 6003 child_event = perf_event_alloc(&parent_event->attr,
6029 parent_event->cpu, 6004 parent_event->cpu,
6005 child,
6030 group_leader, parent_event, 6006 group_leader, parent_event,
6031 NULL); 6007 NULL);
6032 if (IS_ERR(child_event)) 6008 if (IS_ERR(child_event))
diff --git a/kernel/timer.c b/kernel/timer.c
index 97bf05baade7..68a9ae7679b7 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -37,7 +37,7 @@
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/tick.h> 38#include <linux/tick.h>
39#include <linux/kallsyms.h> 39#include <linux/kallsyms.h>
40#include <linux/perf_event.h> 40#include <linux/irq_work.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43 43
@@ -1279,7 +1279,10 @@ void update_process_times(int user_tick)
1279 run_local_timers(); 1279 run_local_timers();
1280 rcu_check_callbacks(cpu, user_tick); 1280 rcu_check_callbacks(cpu, user_tick);
1281 printk_tick(); 1281 printk_tick();
1282 perf_event_do_pending(); 1282#ifdef CONFIG_IRQ_WORK
1283 if (in_irq())
1284 irq_work_run();
1285#endif
1283 scheduler_tick(); 1286 scheduler_tick();
1284 run_posix_cpu_timers(p); 1287 run_posix_cpu_timers(p);
1285} 1288}
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index d6073a50a6ca..e95ee7f31d43 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -265,10 +265,10 @@ static void set_tracepoint(struct tracepoint_entry **entry,
265 */ 265 */
266 rcu_assign_pointer(elem->funcs, (*entry)->funcs); 266 rcu_assign_pointer(elem->funcs, (*entry)->funcs);
267 if (!elem->state && active) { 267 if (!elem->state && active) {
268 enable_jump_label(&elem->state); 268 jump_label_enable(&elem->state);
269 elem->state = active; 269 elem->state = active;
270 } else if (elem->state && !active) { 270 } else if (elem->state && !active) {
271 disable_jump_label(&elem->state); 271 jump_label_disable(&elem->state);
272 elem->state = active; 272 elem->state = active;
273 } 273 }
274} 274}
@@ -285,7 +285,7 @@ static void disable_tracepoint(struct tracepoint *elem)
285 elem->unregfunc(); 285 elem->unregfunc();
286 286
287 if (elem->state) { 287 if (elem->state) {
288 disable_jump_label(&elem->state); 288 jump_label_disable(&elem->state);
289 elem->state = 0; 289 elem->state = 0;
290 } 290 }
291 rcu_assign_pointer(elem->funcs, NULL); 291 rcu_assign_pointer(elem->funcs, NULL);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index e925c7b960f1..7bd6df781ce5 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -142,9 +142,9 @@ static void ddebug_change(const struct ddebug_query *query,
142 dt->num_enabled++; 142 dt->num_enabled++;
143 dp->flags = newflags; 143 dp->flags = newflags;
144 if (newflags) { 144 if (newflags) {
145 enable_jump_label(&dp->enabled); 145 jump_label_enable(&dp->enabled);
146 } else { 146 } else {
147 disable_jump_label(&dp->enabled); 147 jump_label_disable(&dp->enabled);
148 } 148 }
149 if (verbose) 149 if (verbose)
150 printk(KERN_INFO 150 printk(KERN_INFO
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 4db60b2e2a76..843bd4f4ffc9 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -210,7 +210,13 @@ endif
210 210
211ifdef CONFIG_FTRACE_MCOUNT_RECORD 211ifdef CONFIG_FTRACE_MCOUNT_RECORD
212ifdef BUILD_C_RECORDMCOUNT 212ifdef BUILD_C_RECORDMCOUNT
213cmd_record_mcount = $(objtree)/scripts/recordmcount "$(@)"; 213# Due to recursion, we must skip empty.o.
214# The empty.o file is created in the make process in order to determine
215# the target endianness and word size. It is made before all other C
216# files, including recordmcount.
217cmd_record_mcount = if [ $(@) != "scripts/mod/empty.o" ]; then \
218 $(objtree)/scripts/recordmcount "$(@)"; \
219 fi;
214else 220else
215cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ 221cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
216 "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \ 222 "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \