aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-06-27 08:41:57 -0400
committerIngo Molnar <mingo@elte.hu>2011-07-01 05:06:35 -0400
commita8b0ca17b80e92faab46ee7179ba9e99ccb61233 (patch)
treea4a6282139f26458f80dcbe21c709a9290e84143
parent1880c4ae182afb5650c5678949ecfe7ff66a724e (diff)
perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current context, if not, we would set some state and self-IPI and let the resulting interrupt do the wakeup. For the various event classes: - hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from the PMI-tail (ARM etc.) - tracepoint: nmi=0; since tracepoint could be from NMI context. - software: nmi=[0,1]; some, like the schedule thing cannot perform wakeups, and hence need 0. As one can see, there is very little nmi=1 usage, and the down-side of not using it is that on some platforms some software events can have a jiffy delay in wakeup (when arch_irq_work_raise isn't implemented). The up-side however is that we can remove the nmi parameter and save a bunch of conditionals in fast paths. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Michael Cree <mcree@orcon.net.nz> Cc: Will Deacon <will.deacon@arm.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: Anton Blanchard <anton@samba.org> Cc: Eric B Munson <emunson@mgebm.net> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: David S. Miller <davem@davemloft.net> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jason Wessel <jason.wessel@windriver.com> Cc: Don Zickus <dzickus@redhat.com> Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/alpha/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/perf_event_v6.c2
-rw-r--r--arch/arm/kernel/perf_event_v7.c2
-rw-r--r--arch/arm/kernel/perf_event_xscale.c4
-rw-r--r--arch/arm/kernel/ptrace.c2
-rw-r--r--arch/arm/kernel/swp_emulate.c2
-rw-r--r--arch/arm/mm/fault.c6
-rw-r--r--arch/mips/kernel/perf_event.c2
-rw-r--r--arch/mips/kernel/traps.c8
-rw-r--r--arch/mips/kernel/unaligned.c5
-rw-r--r--arch/mips/math-emu/cp1emu.c3
-rw-r--r--arch/mips/mm/fault.c8
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h4
-rw-r--r--arch/powerpc/kernel/perf_event.c6
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c6
-rw-r--r--arch/powerpc/kernel/ptrace.c2
-rw-r--r--arch/powerpc/mm/fault.c6
-rw-r--r--arch/s390/mm/fault.c6
-rw-r--r--arch/sh/kernel/ptrace_32.c2
-rw-r--r--arch/sh/kernel/traps_32.c2
-rw-r--r--arch/sh/kernel/traps_64.c8
-rw-r--r--arch/sh/math-emu/math.c2
-rw-r--r--arch/sh/mm/fault_32.c6
-rw-r--r--arch/sh/mm/tlbflush_64.c6
-rw-r--r--arch/sparc/kernel/perf_event.c2
-rw-r--r--arch/sparc/kernel/unaligned_32.c4
-rw-r--r--arch/sparc/kernel/unaligned_64.c12
-rw-r--r--arch/sparc/kernel/visemul.c2
-rw-r--r--arch/sparc/math-emu/math_32.c2
-rw-r--r--arch/sparc/math-emu/math_64.c2
-rw-r--r--arch/sparc/mm/fault_32.c8
-rw-r--r--arch/sparc/mm/fault_64.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c2
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/mm/fault.c6
-rw-r--r--include/linux/perf_event.h18
-rw-r--r--kernel/events/core.c63
-rw-r--r--kernel/events/internal.h1
-rw-r--r--kernel/events/ring_buffer.c10
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/watchdog.c2
-rw-r--r--samples/hw_breakpoint/data_breakpoint.c2
46 files changed, 119 insertions, 141 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 90561c45e7d8..8e47709160f8 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -847,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
847 data.period = event->hw.last_period; 847 data.period = event->hw.last_period;
848 848
849 if (alpha_perf_event_set_period(event, hwc, idx)) { 849 if (alpha_perf_event_set_period(event, hwc, idx)) {
850 if (perf_event_overflow(event, 1, &data, regs)) { 850 if (perf_event_overflow(event, &data, regs)) {
851 /* Interrupts coming too quickly; "throttle" the 851 /* Interrupts coming too quickly; "throttle" the
852 * counter, i.e., disable it for a little while. 852 * counter, i.e., disable it for a little while.
853 */ 853 */
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index f1e8dd94afe8..38dc4da1d612 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -479,7 +479,7 @@ armv6pmu_handle_irq(int irq_num,
479 if (!armpmu_event_set_period(event, hwc, idx)) 479 if (!armpmu_event_set_period(event, hwc, idx))
480 continue; 480 continue;
481 481
482 if (perf_event_overflow(event, 0, &data, regs)) 482 if (perf_event_overflow(event, &data, regs))
483 armpmu->disable(hwc, idx); 483 armpmu->disable(hwc, idx);
484 } 484 }
485 485
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4960686afb58..6e5f8752303b 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -787,7 +787,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
787 if (!armpmu_event_set_period(event, hwc, idx)) 787 if (!armpmu_event_set_period(event, hwc, idx))
788 continue; 788 continue;
789 789
790 if (perf_event_overflow(event, 0, &data, regs)) 790 if (perf_event_overflow(event, &data, regs))
791 armpmu->disable(hwc, idx); 791 armpmu->disable(hwc, idx);
792 } 792 }
793 793
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 39affbe4fdb2..99b6b85c7e49 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -251,7 +251,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
251 if (!armpmu_event_set_period(event, hwc, idx)) 251 if (!armpmu_event_set_period(event, hwc, idx))
252 continue; 252 continue;
253 253
254 if (perf_event_overflow(event, 0, &data, regs)) 254 if (perf_event_overflow(event, &data, regs))
255 armpmu->disable(hwc, idx); 255 armpmu->disable(hwc, idx);
256 } 256 }
257 257
@@ -583,7 +583,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
583 if (!armpmu_event_set_period(event, hwc, idx)) 583 if (!armpmu_event_set_period(event, hwc, idx))
584 continue; 584 continue;
585 585
586 if (perf_event_overflow(event, 0, &data, regs)) 586 if (perf_event_overflow(event, &data, regs))
587 armpmu->disable(hwc, idx); 587 armpmu->disable(hwc, idx);
588 } 588 }
589 589
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 97260060bf26..0c9b1054f790 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -396,7 +396,7 @@ static long ptrace_hbp_idx_to_num(int idx)
396/* 396/*
397 * Handle hitting a HW-breakpoint. 397 * Handle hitting a HW-breakpoint.
398 */ 398 */
399static void ptrace_hbptriggered(struct perf_event *bp, int unused, 399static void ptrace_hbptriggered(struct perf_event *bp,
400 struct perf_sample_data *data, 400 struct perf_sample_data *data,
401 struct pt_regs *regs) 401 struct pt_regs *regs)
402{ 402{
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 40ee7e5045e4..5f452f8fde05 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -183,7 +183,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
183 unsigned int address, destreg, data, type; 183 unsigned int address, destreg, data, type;
184 unsigned int res = 0; 184 unsigned int res = 0;
185 185
186 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc); 186 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
187 187
188 if (current->pid != previous_pid) { 188 if (current->pid != previous_pid) {
189 pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", 189 pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index bc0e1d88fd3b..9ea4f7ddd665 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -318,11 +318,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
318 fault = __do_page_fault(mm, addr, fsr, tsk); 318 fault = __do_page_fault(mm, addr, fsr, tsk);
319 up_read(&mm->mmap_sem); 319 up_read(&mm->mmap_sem);
320 320
321 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr); 321 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
322 if (fault & VM_FAULT_MAJOR) 322 if (fault & VM_FAULT_MAJOR)
323 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr); 323 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
324 else if (fault & VM_FAULT_MINOR) 324 else if (fault & VM_FAULT_MINOR)
325 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr); 325 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
326 326
327 /* 327 /*
328 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR 328 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index a8244854d3dc..d0deaab9ace2 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -527,7 +527,7 @@ handle_associated_event(struct cpu_hw_events *cpuc,
527 if (!mipspmu_event_set_period(event, hwc, idx)) 527 if (!mipspmu_event_set_period(event, hwc, idx))
528 return; 528 return;
529 529
530 if (perf_event_overflow(event, 0, data, regs)) 530 if (perf_event_overflow(event, data, regs))
531 mipspmu->disable_event(idx); 531 mipspmu->disable_event(idx);
532} 532}
533 533
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e9b3af27d844..b7517e3abc85 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -578,12 +578,12 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
578{ 578{
579 if ((opcode & OPCODE) == LL) { 579 if ((opcode & OPCODE) == LL) {
580 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 580 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
581 1, 0, regs, 0); 581 1, regs, 0);
582 return simulate_ll(regs, opcode); 582 return simulate_ll(regs, opcode);
583 } 583 }
584 if ((opcode & OPCODE) == SC) { 584 if ((opcode & OPCODE) == SC) {
585 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 585 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
586 1, 0, regs, 0); 586 1, regs, 0);
587 return simulate_sc(regs, opcode); 587 return simulate_sc(regs, opcode);
588 } 588 }
589 589
@@ -602,7 +602,7 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
602 int rd = (opcode & RD) >> 11; 602 int rd = (opcode & RD) >> 11;
603 int rt = (opcode & RT) >> 16; 603 int rt = (opcode & RT) >> 16;
604 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 604 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
605 1, 0, regs, 0); 605 1, regs, 0);
606 switch (rd) { 606 switch (rd) {
607 case 0: /* CPU number */ 607 case 0: /* CPU number */
608 regs->regs[rt] = smp_processor_id(); 608 regs->regs[rt] = smp_processor_id();
@@ -640,7 +640,7 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
640{ 640{
641 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { 641 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
642 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 642 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
643 1, 0, regs, 0); 643 1, regs, 0);
644 return 0; 644 return 0;
645 } 645 }
646 646
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index cfea1adfa153..eb319b580353 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -111,8 +111,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
111 unsigned long value; 111 unsigned long value;
112 unsigned int res; 112 unsigned int res;
113 113
114 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 114 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
115 1, 0, regs, 0);
116 115
117 /* 116 /*
118 * This load never faults. 117 * This load never faults.
@@ -517,7 +516,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
517 mm_segment_t seg; 516 mm_segment_t seg;
518 517
519 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 518 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
520 1, 0, regs, regs->cp0_badvaddr); 519 1, regs, regs->cp0_badvaddr);
521 /* 520 /*
522 * Did we catch a fault trying to load an instruction? 521 * Did we catch a fault trying to load an instruction?
523 * Or are we running in MIPS16 mode? 522 * Or are we running in MIPS16 mode?
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index d32cb0503110..dbf2f93a5091 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -272,8 +272,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
272 } 272 }
273 273
274 emul: 274 emul:
275 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 275 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
276 1, 0, xcp, 0);
277 MIPS_FPU_EMU_INC_STATS(emulated); 276 MIPS_FPU_EMU_INC_STATS(emulated);
278 switch (MIPSInst_OPCODE(ir)) { 277 switch (MIPSInst_OPCODE(ir)) {
279 case ldc1_op:{ 278 case ldc1_op:{
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 137ee76a0045..937cf3368164 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -145,7 +145,7 @@ good_area:
145 * the fault. 145 * the fault.
146 */ 146 */
147 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 147 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
148 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 148 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
149 if (unlikely(fault & VM_FAULT_ERROR)) { 149 if (unlikely(fault & VM_FAULT_ERROR)) {
150 if (fault & VM_FAULT_OOM) 150 if (fault & VM_FAULT_OOM)
151 goto out_of_memory; 151 goto out_of_memory;
@@ -154,12 +154,10 @@ good_area:
154 BUG(); 154 BUG();
155 } 155 }
156 if (fault & VM_FAULT_MAJOR) { 156 if (fault & VM_FAULT_MAJOR) {
157 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 157 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
158 1, 0, regs, address);
159 tsk->maj_flt++; 158 tsk->maj_flt++;
160 } else { 159 } else {
161 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 160 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
162 1, 0, regs, address);
163 tsk->min_flt++; 161 tsk->min_flt++;
164 } 162 }
165 163
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index 45921672b97a..2cc41c715d2b 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type);
78#define PPC_WARN_EMULATED(type, regs) \ 78#define PPC_WARN_EMULATED(type, regs) \
79 do { \ 79 do { \
80 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ 80 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \
81 1, 0, regs, 0); \ 81 1, regs, 0); \
82 __PPC_WARN_EMULATED(type); \ 82 __PPC_WARN_EMULATED(type); \
83 } while (0) 83 } while (0)
84 84
85#define PPC_WARN_ALIGNMENT(type, regs) \ 85#define PPC_WARN_ALIGNMENT(type, regs) \
86 do { \ 86 do { \
87 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ 87 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \
88 1, 0, regs, regs->dar); \ 88 1, regs, regs->dar); \
89 __PPC_WARN_EMULATED(type); \ 89 __PPC_WARN_EMULATED(type); \
90 } while (0) 90 } while (0)
91 91
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 822f63008ae1..14967de98876 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1207,7 +1207,7 @@ struct pmu power_pmu = {
1207 * here so there is no possibility of being interrupted. 1207 * here so there is no possibility of being interrupted.
1208 */ 1208 */
1209static void record_and_restart(struct perf_event *event, unsigned long val, 1209static void record_and_restart(struct perf_event *event, unsigned long val,
1210 struct pt_regs *regs, int nmi) 1210 struct pt_regs *regs)
1211{ 1211{
1212 u64 period = event->hw.sample_period; 1212 u64 period = event->hw.sample_period;
1213 s64 prev, delta, left; 1213 s64 prev, delta, left;
@@ -1258,7 +1258,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1258 if (event->attr.sample_type & PERF_SAMPLE_ADDR) 1258 if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1259 perf_get_data_addr(regs, &data.addr); 1259 perf_get_data_addr(regs, &data.addr);
1260 1260
1261 if (perf_event_overflow(event, nmi, &data, regs)) 1261 if (perf_event_overflow(event, &data, regs))
1262 power_pmu_stop(event, 0); 1262 power_pmu_stop(event, 0);
1263 } 1263 }
1264} 1264}
@@ -1346,7 +1346,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
1346 if ((int)val < 0) { 1346 if ((int)val < 0) {
1347 /* event has overflowed */ 1347 /* event has overflowed */
1348 found = 1; 1348 found = 1;
1349 record_and_restart(event, val, regs, nmi); 1349 record_and_restart(event, val, regs);
1350 } 1350 }
1351 } 1351 }
1352 1352
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index b0dc8f7069cd..0a6d2a9d569c 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -568,7 +568,7 @@ static struct pmu fsl_emb_pmu = {
568 * here so there is no possibility of being interrupted. 568 * here so there is no possibility of being interrupted.
569 */ 569 */
570static void record_and_restart(struct perf_event *event, unsigned long val, 570static void record_and_restart(struct perf_event *event, unsigned long val,
571 struct pt_regs *regs, int nmi) 571 struct pt_regs *regs)
572{ 572{
573 u64 period = event->hw.sample_period; 573 u64 period = event->hw.sample_period;
574 s64 prev, delta, left; 574 s64 prev, delta, left;
@@ -616,7 +616,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
616 perf_sample_data_init(&data, 0); 616 perf_sample_data_init(&data, 0);
617 data.period = event->hw.last_period; 617 data.period = event->hw.last_period;
618 618
619 if (perf_event_overflow(event, nmi, &data, regs)) 619 if (perf_event_overflow(event, &data, regs))
620 fsl_emb_pmu_stop(event, 0); 620 fsl_emb_pmu_stop(event, 0);
621 } 621 }
622} 622}
@@ -644,7 +644,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
644 if (event) { 644 if (event) {
645 /* event has overflowed */ 645 /* event has overflowed */
646 found = 1; 646 found = 1;
647 record_and_restart(event, val, regs, nmi); 647 record_and_restart(event, val, regs);
648 } else { 648 } else {
649 /* 649 /*
650 * Disabled counter is negative, 650 * Disabled counter is negative,
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index cb22024f2b42..3177617af2ef 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -882,7 +882,7 @@ void user_disable_single_step(struct task_struct *task)
882} 882}
883 883
884#ifdef CONFIG_HAVE_HW_BREAKPOINT 884#ifdef CONFIG_HAVE_HW_BREAKPOINT
885void ptrace_triggered(struct perf_event *bp, int nmi, 885void ptrace_triggered(struct perf_event *bp,
886 struct perf_sample_data *data, struct pt_regs *regs) 886 struct perf_sample_data *data, struct pt_regs *regs)
887{ 887{
888 struct perf_event_attr attr; 888 struct perf_event_attr attr;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 54f4fb994e99..dbc48254c6cc 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -173,7 +173,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
173 die("Weird page fault", regs, SIGSEGV); 173 die("Weird page fault", regs, SIGSEGV);
174 } 174 }
175 175
176 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 176 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
177 177
178 /* When running in the kernel we expect faults to occur only to 178 /* When running in the kernel we expect faults to occur only to
179 * addresses in user space. All other faults represent errors in the 179 * addresses in user space. All other faults represent errors in the
@@ -319,7 +319,7 @@ good_area:
319 } 319 }
320 if (ret & VM_FAULT_MAJOR) { 320 if (ret & VM_FAULT_MAJOR) {
321 current->maj_flt++; 321 current->maj_flt++;
322 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 322 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
323 regs, address); 323 regs, address);
324#ifdef CONFIG_PPC_SMLPAR 324#ifdef CONFIG_PPC_SMLPAR
325 if (firmware_has_feature(FW_FEATURE_CMO)) { 325 if (firmware_has_feature(FW_FEATURE_CMO)) {
@@ -330,7 +330,7 @@ good_area:
330#endif 330#endif
331 } else { 331 } else {
332 current->min_flt++; 332 current->min_flt++;
333 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 333 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
334 regs, address); 334 regs, address);
335 } 335 }
336 up_read(&mm->mmap_sem); 336 up_read(&mm->mmap_sem);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fe103e891e7a..095f782a5512 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -299,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
299 goto out; 299 goto out;
300 300
301 address = trans_exc_code & __FAIL_ADDR_MASK; 301 address = trans_exc_code & __FAIL_ADDR_MASK;
302 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 302 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
303 flags = FAULT_FLAG_ALLOW_RETRY; 303 flags = FAULT_FLAG_ALLOW_RETRY;
304 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) 304 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
305 flags |= FAULT_FLAG_WRITE; 305 flags |= FAULT_FLAG_WRITE;
@@ -345,11 +345,11 @@ retry:
345 if (flags & FAULT_FLAG_ALLOW_RETRY) { 345 if (flags & FAULT_FLAG_ALLOW_RETRY) {
346 if (fault & VM_FAULT_MAJOR) { 346 if (fault & VM_FAULT_MAJOR) {
347 tsk->maj_flt++; 347 tsk->maj_flt++;
348 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 348 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
349 regs, address); 349 regs, address);
350 } else { 350 } else {
351 tsk->min_flt++; 351 tsk->min_flt++;
352 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 352 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
353 regs, address); 353 regs, address);
354 } 354 }
355 if (fault & VM_FAULT_RETRY) { 355 if (fault & VM_FAULT_RETRY) {
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 3d7b209b2178..8051976100a6 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -63,7 +63,7 @@ static inline int put_stack_long(struct task_struct *task, int offset,
63 return 0; 63 return 0;
64} 64}
65 65
66void ptrace_triggered(struct perf_event *bp, int nmi, 66void ptrace_triggered(struct perf_event *bp,
67 struct perf_sample_data *data, struct pt_regs *regs) 67 struct perf_sample_data *data, struct pt_regs *regs)
68{ 68{
69 struct perf_event_attr attr; 69 struct perf_event_attr attr;
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index b51a17104b5f..d9006f8ffc14 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -393,7 +393,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
393 */ 393 */
394 if (!expected) { 394 if (!expected) {
395 unaligned_fixups_notify(current, instruction, regs); 395 unaligned_fixups_notify(current, instruction, regs);
396 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, 396 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
397 regs, address); 397 regs, address);
398 } 398 }
399 399
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index 6713ca97e553..67110be83fd7 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -434,7 +434,7 @@ static int misaligned_load(struct pt_regs *regs,
434 return error; 434 return error;
435 } 435 }
436 436
437 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); 437 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
438 438
439 destreg = (opcode >> 4) & 0x3f; 439 destreg = (opcode >> 4) & 0x3f;
440 if (user_mode(regs)) { 440 if (user_mode(regs)) {
@@ -512,7 +512,7 @@ static int misaligned_store(struct pt_regs *regs,
512 return error; 512 return error;
513 } 513 }
514 514
515 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); 515 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
516 516
517 srcreg = (opcode >> 4) & 0x3f; 517 srcreg = (opcode >> 4) & 0x3f;
518 if (user_mode(regs)) { 518 if (user_mode(regs)) {
@@ -588,7 +588,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
588 return error; 588 return error;
589 } 589 }
590 590
591 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); 591 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
592 592
593 destreg = (opcode >> 4) & 0x3f; 593 destreg = (opcode >> 4) & 0x3f;
594 if (user_mode(regs)) { 594 if (user_mode(regs)) {
@@ -665,7 +665,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
665 return error; 665 return error;
666 } 666 }
667 667
668 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); 668 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
669 669
670 srcreg = (opcode >> 4) & 0x3f; 670 srcreg = (opcode >> 4) & 0x3f;
671 if (user_mode(regs)) { 671 if (user_mode(regs)) {
diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c
index f76a5090d5d1..977195210653 100644
--- a/arch/sh/math-emu/math.c
+++ b/arch/sh/math-emu/math.c
@@ -620,7 +620,7 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
620 struct task_struct *tsk = current; 620 struct task_struct *tsk = current;
621 struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu); 621 struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
622 622
623 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 623 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
624 624
625 if (!(task_thread_info(tsk)->status & TS_USEDFPU)) { 625 if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
626 /* initialize once. */ 626 /* initialize once. */
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index d4c34d757f0d..7bebd044f2a1 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -160,7 +160,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
160 if ((regs->sr & SR_IMASK) != SR_IMASK) 160 if ((regs->sr & SR_IMASK) != SR_IMASK)
161 local_irq_enable(); 161 local_irq_enable();
162 162
163 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 163 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
164 164
165 /* 165 /*
166 * If we're in an interrupt, have no user context or are running 166 * If we're in an interrupt, have no user context or are running
@@ -210,11 +210,11 @@ good_area:
210 } 210 }
211 if (fault & VM_FAULT_MAJOR) { 211 if (fault & VM_FAULT_MAJOR) {
212 tsk->maj_flt++; 212 tsk->maj_flt++;
213 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 213 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
214 regs, address); 214 regs, address);
215 } else { 215 } else {
216 tsk->min_flt++; 216 tsk->min_flt++;
217 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 217 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
218 regs, address); 218 regs, address);
219 } 219 }
220 220
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 7f5810f5dfdc..e3430e093d43 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
116 /* Not an IO address, so reenable interrupts */ 116 /* Not an IO address, so reenable interrupts */
117 local_irq_enable(); 117 local_irq_enable();
118 118
119 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 119 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
120 120
121 /* 121 /*
122 * If we're in an interrupt or have no user 122 * If we're in an interrupt or have no user
@@ -200,11 +200,11 @@ good_area:
200 200
201 if (fault & VM_FAULT_MAJOR) { 201 if (fault & VM_FAULT_MAJOR) {
202 tsk->maj_flt++; 202 tsk->maj_flt++;
203 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 203 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
204 regs, address); 204 regs, address);
205 } else { 205 } else {
206 tsk->min_flt++; 206 tsk->min_flt++;
207 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 207 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
208 regs, address); 208 regs, address);
209 } 209 }
210 210
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 2cb0e1c001e2..0b32f2e9e08d 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1277,7 +1277,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1277 if (!sparc_perf_event_set_period(event, hwc, idx)) 1277 if (!sparc_perf_event_set_period(event, hwc, idx))
1278 continue; 1278 continue;
1279 1279
1280 if (perf_event_overflow(event, 1, &data, regs)) 1280 if (perf_event_overflow(event, &data, regs))
1281 sparc_pmu_stop(event, 0); 1281 sparc_pmu_stop(event, 0);
1282 } 1282 }
1283 1283
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index 4491f4cb2695..7efbb2f9e77f 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -247,7 +247,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
247 unsigned long addr = compute_effective_address(regs, insn); 247 unsigned long addr = compute_effective_address(regs, insn);
248 int err; 248 int err;
249 249
250 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); 250 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
251 switch (dir) { 251 switch (dir) {
252 case load: 252 case load:
253 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), 253 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
@@ -338,7 +338,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
338 } 338 }
339 339
340 addr = compute_effective_address(regs, insn); 340 addr = compute_effective_address(regs, insn);
341 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); 341 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
342 switch(dir) { 342 switch(dir) {
343 case load: 343 case load:
344 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), 344 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index b2b019ea8caa..35cff1673aa4 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -317,7 +317,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
317 317
318 addr = compute_effective_address(regs, insn, 318 addr = compute_effective_address(regs, insn,
319 ((insn >> 25) & 0x1f)); 319 ((insn >> 25) & 0x1f));
320 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); 320 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
321 switch (asi) { 321 switch (asi) {
322 case ASI_NL: 322 case ASI_NL:
323 case ASI_AIUPL: 323 case ASI_AIUPL:
@@ -384,7 +384,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
384 int ret, i, rd = ((insn >> 25) & 0x1f); 384 int ret, i, rd = ((insn >> 25) & 0x1f);
385 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 385 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
386 386
387 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 387 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
388 if (insn & 0x2000) { 388 if (insn & 0x2000) {
389 maybe_flush_windows(0, 0, rd, from_kernel); 389 maybe_flush_windows(0, 0, rd, from_kernel);
390 value = sign_extend_imm13(insn); 390 value = sign_extend_imm13(insn);
@@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
431 int asi = decode_asi(insn, regs); 431 int asi = decode_asi(insn, regs);
432 int flag = (freg < 32) ? FPRS_DL : FPRS_DU; 432 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
433 433
434 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 434 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
435 435
436 save_and_clear_fpu(); 436 save_and_clear_fpu();
437 current_thread_info()->xfsr[0] &= ~0x1c000; 437 current_thread_info()->xfsr[0] &= ~0x1c000;
@@ -554,7 +554,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
554 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 554 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
555 unsigned long *reg; 555 unsigned long *reg;
556 556
557 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 557 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
558 558
559 maybe_flush_windows(0, 0, rd, from_kernel); 559 maybe_flush_windows(0, 0, rd, from_kernel);
560 reg = fetch_reg_addr(rd, regs); 560 reg = fetch_reg_addr(rd, regs);
@@ -586,7 +586,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
586 586
587 if (tstate & TSTATE_PRIV) 587 if (tstate & TSTATE_PRIV)
588 die_if_kernel("lddfmna from kernel", regs); 588 die_if_kernel("lddfmna from kernel", regs);
589 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); 589 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
590 if (test_thread_flag(TIF_32BIT)) 590 if (test_thread_flag(TIF_32BIT))
591 pc = (u32)pc; 591 pc = (u32)pc;
592 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 592 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
@@ -647,7 +647,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
647 647
648 if (tstate & TSTATE_PRIV) 648 if (tstate & TSTATE_PRIV)
649 die_if_kernel("stdfmna from kernel", regs); 649 die_if_kernel("stdfmna from kernel", regs);
650 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); 650 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
651 if (test_thread_flag(TIF_32BIT)) 651 if (test_thread_flag(TIF_32BIT))
652 pc = (u32)pc; 652 pc = (u32)pc;
653 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 653 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index 36357717d691..32b626c9d815 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -802,7 +802,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
802 802
803 BUG_ON(regs->tstate & TSTATE_PRIV); 803 BUG_ON(regs->tstate & TSTATE_PRIV);
804 804
805 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 805 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
806 806
807 if (test_thread_flag(TIF_32BIT)) 807 if (test_thread_flag(TIF_32BIT))
808 pc = (u32)pc; 808 pc = (u32)pc;
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
index a3fccde894ec..aa4d55b0bdf0 100644
--- a/arch/sparc/math-emu/math_32.c
+++ b/arch/sparc/math-emu/math_32.c
@@ -164,7 +164,7 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
164 int retcode = 0; /* assume all succeed */ 164 int retcode = 0; /* assume all succeed */
165 unsigned long insn; 165 unsigned long insn;
166 166
167 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 167 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
168 168
169#ifdef DEBUG_MATHEMU 169#ifdef DEBUG_MATHEMU
170 printk("In do_mathemu()... pc is %08lx\n", regs->pc); 170 printk("In do_mathemu()... pc is %08lx\n", regs->pc);
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c
index 56d2c44747b8..e575bd2fe381 100644
--- a/arch/sparc/math-emu/math_64.c
+++ b/arch/sparc/math-emu/math_64.c
@@ -184,7 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
184 184
185 if (tstate & TSTATE_PRIV) 185 if (tstate & TSTATE_PRIV)
186 die_if_kernel("unfinished/unimplemented FPop from kernel", regs); 186 die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
187 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 187 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
188 if (test_thread_flag(TIF_32BIT)) 188 if (test_thread_flag(TIF_32BIT))
189 pc = (u32)pc; 189 pc = (u32)pc;
190 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 190 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 7543ddbdadb2..aa1c1b1ce5cc 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -251,7 +251,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
251 if (in_atomic() || !mm) 251 if (in_atomic() || !mm)
252 goto no_context; 252 goto no_context;
253 253
254 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 254 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
255 255
256 down_read(&mm->mmap_sem); 256 down_read(&mm->mmap_sem);
257 257
@@ -301,12 +301,10 @@ good_area:
301 } 301 }
302 if (fault & VM_FAULT_MAJOR) { 302 if (fault & VM_FAULT_MAJOR) {
303 current->maj_flt++; 303 current->maj_flt++;
304 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 304 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
305 regs, address);
306 } else { 305 } else {
307 current->min_flt++; 306 current->min_flt++;
308 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 307 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
309 regs, address);
310 } 308 }
311 up_read(&mm->mmap_sem); 309 up_read(&mm->mmap_sem);
312 return; 310 return;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index f92ce56a8b22..504c0622f729 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -325,7 +325,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
325 if (in_atomic() || !mm) 325 if (in_atomic() || !mm)
326 goto intr_or_no_mm; 326 goto intr_or_no_mm;
327 327
328 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 328 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
329 329
330 if (!down_read_trylock(&mm->mmap_sem)) { 330 if (!down_read_trylock(&mm->mmap_sem)) {
331 if ((regs->tstate & TSTATE_PRIV) && 331 if ((regs->tstate & TSTATE_PRIV) &&
@@ -433,12 +433,10 @@ good_area:
433 } 433 }
434 if (fault & VM_FAULT_MAJOR) { 434 if (fault & VM_FAULT_MAJOR) {
435 current->maj_flt++; 435 current->maj_flt++;
436 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 436 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
437 regs, address);
438 } else { 437 } else {
439 current->min_flt++; 438 current->min_flt++;
440 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 439 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
441 regs, address);
442 } 440 }
443 up_read(&mm->mmap_sem); 441 up_read(&mm->mmap_sem);
444 442
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8a57f9aa8e36..5b86ec51534c 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1339,7 +1339,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1339 if (!x86_perf_event_set_period(event)) 1339 if (!x86_perf_event_set_period(event))
1340 continue; 1340 continue;
1341 1341
1342 if (perf_event_overflow(event, 1, &data, regs)) 1342 if (perf_event_overflow(event, &data, regs))
1343 x86_pmu_stop(event, 0); 1343 x86_pmu_stop(event, 0);
1344 } 1344 }
1345 1345
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 41178c826c48..d38b0020f775 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1003,7 +1003,7 @@ again:
1003 1003
1004 data.period = event->hw.last_period; 1004 data.period = event->hw.last_period;
1005 1005
1006 if (perf_event_overflow(event, 1, &data, regs)) 1006 if (perf_event_overflow(event, &data, regs))
1007 x86_pmu_stop(event, 0); 1007 x86_pmu_stop(event, 0);
1008 } 1008 }
1009 1009
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index bab491b8ee25..0941f93f2940 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -340,7 +340,7 @@ static int intel_pmu_drain_bts_buffer(void)
340 */ 340 */
341 perf_prepare_sample(&header, &data, event, &regs); 341 perf_prepare_sample(&header, &data, event, &regs);
342 342
343 if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) 343 if (perf_output_begin(&handle, event, header.size * (top - at), 1))
344 return 1; 344 return 1;
345 345
346 for (; at < top; at++) { 346 for (; at < top; at++) {
@@ -616,7 +616,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
616 else 616 else
617 regs.flags &= ~PERF_EFLAGS_EXACT; 617 regs.flags &= ~PERF_EFLAGS_EXACT;
618 618
619 if (perf_event_overflow(event, 1, &data, &regs)) 619 if (perf_event_overflow(event, &data, &regs))
620 x86_pmu_stop(event, 0); 620 x86_pmu_stop(event, 0);
621} 621}
622 622
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index f76fddf63381..d6e6a67b9608 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -970,7 +970,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
970 970
971 if (!x86_perf_event_set_period(event)) 971 if (!x86_perf_event_set_period(event))
972 continue; 972 continue;
973 if (perf_event_overflow(event, 1, &data, regs)) 973 if (perf_event_overflow(event, &data, regs))
974 x86_pmu_stop(event, 0); 974 x86_pmu_stop(event, 0);
975 } 975 }
976 976
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 5f9ecff328b5..98da6a7b5e82 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -608,7 +608,7 @@ int kgdb_arch_init(void)
608 return register_die_notifier(&kgdb_notifier); 608 return register_die_notifier(&kgdb_notifier);
609} 609}
610 610
611static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi, 611static void kgdb_hw_overflow_handler(struct perf_event *event,
612 struct perf_sample_data *data, struct pt_regs *regs) 612 struct perf_sample_data *data, struct pt_regs *regs)
613{ 613{
614 struct task_struct *tsk = current; 614 struct task_struct *tsk = current;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 807c2a2b80f1..11db2e9b860a 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -528,7 +528,7 @@ static int genregs_set(struct task_struct *target,
528 return ret; 528 return ret;
529} 529}
530 530
531static void ptrace_triggered(struct perf_event *bp, int nmi, 531static void ptrace_triggered(struct perf_event *bp,
532 struct perf_sample_data *data, 532 struct perf_sample_data *data,
533 struct pt_regs *regs) 533 struct pt_regs *regs)
534{ 534{
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2dbf6bf4c7e5..4d09df054e39 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1059,7 +1059,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
1059 if (unlikely(error_code & PF_RSVD)) 1059 if (unlikely(error_code & PF_RSVD))
1060 pgtable_bad(regs, error_code, address); 1060 pgtable_bad(regs, error_code, address);
1061 1061
1062 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 1062 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1063 1063
1064 /* 1064 /*
1065 * If we're in an interrupt, have no user context or are running 1065 * If we're in an interrupt, have no user context or are running
@@ -1161,11 +1161,11 @@ good_area:
1161 if (flags & FAULT_FLAG_ALLOW_RETRY) { 1161 if (flags & FAULT_FLAG_ALLOW_RETRY) {
1162 if (fault & VM_FAULT_MAJOR) { 1162 if (fault & VM_FAULT_MAJOR) {
1163 tsk->maj_flt++; 1163 tsk->maj_flt++;
1164 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 1164 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
1165 regs, address); 1165 regs, address);
1166 } else { 1166 } else {
1167 tsk->min_flt++; 1167 tsk->min_flt++;
1168 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 1168 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
1169 regs, address); 1169 regs, address);
1170 } 1170 }
1171 if (fault & VM_FAULT_RETRY) { 1171 if (fault & VM_FAULT_RETRY) {
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2f7b5d42ab41..0946a8bc098d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -682,7 +682,7 @@ enum perf_event_active_state {
682struct file; 682struct file;
683struct perf_sample_data; 683struct perf_sample_data;
684 684
685typedef void (*perf_overflow_handler_t)(struct perf_event *, int, 685typedef void (*perf_overflow_handler_t)(struct perf_event *,
686 struct perf_sample_data *, 686 struct perf_sample_data *,
687 struct pt_regs *regs); 687 struct pt_regs *regs);
688 688
@@ -925,7 +925,6 @@ struct perf_output_handle {
925 unsigned long size; 925 unsigned long size;
926 void *addr; 926 void *addr;
927 int page; 927 int page;
928 int nmi;
929 int sample; 928 int sample;
930}; 929};
931 930
@@ -993,7 +992,7 @@ extern void perf_prepare_sample(struct perf_event_header *header,
993 struct perf_event *event, 992 struct perf_event *event,
994 struct pt_regs *regs); 993 struct pt_regs *regs);
995 994
996extern int perf_event_overflow(struct perf_event *event, int nmi, 995extern int perf_event_overflow(struct perf_event *event,
997 struct perf_sample_data *data, 996 struct perf_sample_data *data,
998 struct pt_regs *regs); 997 struct pt_regs *regs);
999 998
@@ -1012,7 +1011,7 @@ static inline int is_software_event(struct perf_event *event)
1012 1011
1013extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 1012extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1014 1013
1015extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); 1014extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1016 1015
1017#ifndef perf_arch_fetch_caller_regs 1016#ifndef perf_arch_fetch_caller_regs
1018static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } 1017static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
@@ -1034,7 +1033,7 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1034} 1033}
1035 1034
1036static __always_inline void 1035static __always_inline void
1037perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) 1036perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1038{ 1037{
1039 struct pt_regs hot_regs; 1038 struct pt_regs hot_regs;
1040 1039
@@ -1043,7 +1042,7 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
1043 perf_fetch_caller_regs(&hot_regs); 1042 perf_fetch_caller_regs(&hot_regs);
1044 regs = &hot_regs; 1043 regs = &hot_regs;
1045 } 1044 }
1046 __perf_sw_event(event_id, nr, nmi, regs, addr); 1045 __perf_sw_event(event_id, nr, regs, addr);
1047 } 1046 }
1048} 1047}
1049 1048
@@ -1057,7 +1056,7 @@ static inline void perf_event_task_sched_in(struct task_struct *task)
1057 1056
1058static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) 1057static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
1059{ 1058{
1060 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); 1059 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1061 1060
1062 __perf_event_task_sched_out(task, next); 1061 __perf_event_task_sched_out(task, next);
1063} 1062}
@@ -1119,7 +1118,7 @@ extern void perf_bp_event(struct perf_event *event, void *data);
1119 1118
1120extern int perf_output_begin(struct perf_output_handle *handle, 1119extern int perf_output_begin(struct perf_output_handle *handle,
1121 struct perf_event *event, unsigned int size, 1120 struct perf_event *event, unsigned int size,
1122 int nmi, int sample); 1121 int sample);
1123extern void perf_output_end(struct perf_output_handle *handle); 1122extern void perf_output_end(struct perf_output_handle *handle);
1124extern void perf_output_copy(struct perf_output_handle *handle, 1123extern void perf_output_copy(struct perf_output_handle *handle,
1125 const void *buf, unsigned int len); 1124 const void *buf, unsigned int len);
@@ -1143,8 +1142,7 @@ static inline int perf_event_task_disable(void) { return -EINVAL; }
1143static inline int perf_event_task_enable(void) { return -EINVAL; } 1142static inline int perf_event_task_enable(void) { return -EINVAL; }
1144 1143
1145static inline void 1144static inline void
1146perf_sw_event(u32 event_id, u64 nr, int nmi, 1145perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
1147 struct pt_regs *regs, u64 addr) { }
1148static inline void 1146static inline void
1149perf_bp_event(struct perf_event *event, void *data) { } 1147perf_bp_event(struct perf_event *event, void *data) { }
1150 1148
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 270e32f9fc06..dbd1ca75bd3c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3972,7 +3972,7 @@ void perf_prepare_sample(struct perf_event_header *header,
3972 } 3972 }
3973} 3973}
3974 3974
3975static void perf_event_output(struct perf_event *event, int nmi, 3975static void perf_event_output(struct perf_event *event,
3976 struct perf_sample_data *data, 3976 struct perf_sample_data *data,
3977 struct pt_regs *regs) 3977 struct pt_regs *regs)
3978{ 3978{
@@ -3984,7 +3984,7 @@ static void perf_event_output(struct perf_event *event, int nmi,
3984 3984
3985 perf_prepare_sample(&header, data, event, regs); 3985 perf_prepare_sample(&header, data, event, regs);
3986 3986
3987 if (perf_output_begin(&handle, event, header.size, nmi, 1)) 3987 if (perf_output_begin(&handle, event, header.size, 1))
3988 goto exit; 3988 goto exit;
3989 3989
3990 perf_output_sample(&handle, &header, data, event); 3990 perf_output_sample(&handle, &header, data, event);
@@ -4024,7 +4024,7 @@ perf_event_read_event(struct perf_event *event,
4024 int ret; 4024 int ret;
4025 4025
4026 perf_event_header__init_id(&read_event.header, &sample, event); 4026 perf_event_header__init_id(&read_event.header, &sample, event);
4027 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); 4027 ret = perf_output_begin(&handle, event, read_event.header.size, 0);
4028 if (ret) 4028 if (ret)
4029 return; 4029 return;
4030 4030
@@ -4067,7 +4067,7 @@ static void perf_event_task_output(struct perf_event *event,
4067 perf_event_header__init_id(&task_event->event_id.header, &sample, event); 4067 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
4068 4068
4069 ret = perf_output_begin(&handle, event, 4069 ret = perf_output_begin(&handle, event,
4070 task_event->event_id.header.size, 0, 0); 4070 task_event->event_id.header.size, 0);
4071 if (ret) 4071 if (ret)
4072 goto out; 4072 goto out;
4073 4073
@@ -4204,7 +4204,7 @@ static void perf_event_comm_output(struct perf_event *event,
4204 4204
4205 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); 4205 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4206 ret = perf_output_begin(&handle, event, 4206 ret = perf_output_begin(&handle, event,
4207 comm_event->event_id.header.size, 0, 0); 4207 comm_event->event_id.header.size, 0);
4208 4208
4209 if (ret) 4209 if (ret)
4210 goto out; 4210 goto out;
@@ -4351,7 +4351,7 @@ static void perf_event_mmap_output(struct perf_event *event,
4351 4351
4352 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 4352 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4353 ret = perf_output_begin(&handle, event, 4353 ret = perf_output_begin(&handle, event,
4354 mmap_event->event_id.header.size, 0, 0); 4354 mmap_event->event_id.header.size, 0);
4355 if (ret) 4355 if (ret)
4356 goto out; 4356 goto out;
4357 4357
@@ -4546,7 +4546,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
4546 perf_event_header__init_id(&throttle_event.header, &sample, event); 4546 perf_event_header__init_id(&throttle_event.header, &sample, event);
4547 4547
4548 ret = perf_output_begin(&handle, event, 4548 ret = perf_output_begin(&handle, event,
4549 throttle_event.header.size, 1, 0); 4549 throttle_event.header.size, 0);
4550 if (ret) 4550 if (ret)
4551 return; 4551 return;
4552 4552
@@ -4559,7 +4559,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
4559 * Generic event overflow handling, sampling. 4559 * Generic event overflow handling, sampling.
4560 */ 4560 */
4561 4561
4562static int __perf_event_overflow(struct perf_event *event, int nmi, 4562static int __perf_event_overflow(struct perf_event *event,
4563 int throttle, struct perf_sample_data *data, 4563 int throttle, struct perf_sample_data *data,
4564 struct pt_regs *regs) 4564 struct pt_regs *regs)
4565{ 4565{
@@ -4602,34 +4602,28 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
4602 if (events && atomic_dec_and_test(&event->event_limit)) { 4602 if (events && atomic_dec_and_test(&event->event_limit)) {
4603 ret = 1; 4603 ret = 1;
4604 event->pending_kill = POLL_HUP; 4604 event->pending_kill = POLL_HUP;
4605 if (nmi) { 4605 event->pending_disable = 1;
4606 event->pending_disable = 1; 4606 irq_work_queue(&event->pending);
4607 irq_work_queue(&event->pending);
4608 } else
4609 perf_event_disable(event);
4610 } 4607 }
4611 4608
4612 if (event->overflow_handler) 4609 if (event->overflow_handler)
4613 event->overflow_handler(event, nmi, data, regs); 4610 event->overflow_handler(event, data, regs);
4614 else 4611 else
4615 perf_event_output(event, nmi, data, regs); 4612 perf_event_output(event, data, regs);
4616 4613
4617 if (event->fasync && event->pending_kill) { 4614 if (event->fasync && event->pending_kill) {
4618 if (nmi) { 4615 event->pending_wakeup = 1;
4619 event->pending_wakeup = 1; 4616 irq_work_queue(&event->pending);
4620 irq_work_queue(&event->pending);
4621 } else
4622 perf_event_wakeup(event);
4623 } 4617 }
4624 4618
4625 return ret; 4619 return ret;
4626} 4620}
4627 4621
4628int perf_event_overflow(struct perf_event *event, int nmi, 4622int perf_event_overflow(struct perf_event *event,
4629 struct perf_sample_data *data, 4623 struct perf_sample_data *data,
4630 struct pt_regs *regs) 4624 struct pt_regs *regs)
4631{ 4625{
4632 return __perf_event_overflow(event, nmi, 1, data, regs); 4626 return __perf_event_overflow(event, 1, data, regs);
4633} 4627}
4634 4628
4635/* 4629/*
@@ -4678,7 +4672,7 @@ again:
4678} 4672}
4679 4673
4680static void perf_swevent_overflow(struct perf_event *event, u64 overflow, 4674static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4681 int nmi, struct perf_sample_data *data, 4675 struct perf_sample_data *data,
4682 struct pt_regs *regs) 4676 struct pt_regs *regs)
4683{ 4677{
4684 struct hw_perf_event *hwc = &event->hw; 4678 struct hw_perf_event *hwc = &event->hw;
@@ -4692,7 +4686,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4692 return; 4686 return;
4693 4687
4694 for (; overflow; overflow--) { 4688 for (; overflow; overflow--) {
4695 if (__perf_event_overflow(event, nmi, throttle, 4689 if (__perf_event_overflow(event, throttle,
4696 data, regs)) { 4690 data, regs)) {
4697 /* 4691 /*
4698 * We inhibit the overflow from happening when 4692 * We inhibit the overflow from happening when
@@ -4705,7 +4699,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4705} 4699}
4706 4700
4707static void perf_swevent_event(struct perf_event *event, u64 nr, 4701static void perf_swevent_event(struct perf_event *event, u64 nr,
4708 int nmi, struct perf_sample_data *data, 4702 struct perf_sample_data *data,
4709 struct pt_regs *regs) 4703 struct pt_regs *regs)
4710{ 4704{
4711 struct hw_perf_event *hwc = &event->hw; 4705 struct hw_perf_event *hwc = &event->hw;
@@ -4719,12 +4713,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
4719 return; 4713 return;
4720 4714
4721 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 4715 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4722 return perf_swevent_overflow(event, 1, nmi, data, regs); 4716 return perf_swevent_overflow(event, 1, data, regs);
4723 4717
4724 if (local64_add_negative(nr, &hwc->period_left)) 4718 if (local64_add_negative(nr, &hwc->period_left))
4725 return; 4719 return;
4726 4720
4727 perf_swevent_overflow(event, 0, nmi, data, regs); 4721 perf_swevent_overflow(event, 0, data, regs);
4728} 4722}
4729 4723
4730static int perf_exclude_event(struct perf_event *event, 4724static int perf_exclude_event(struct perf_event *event,
@@ -4812,7 +4806,7 @@ find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
4812} 4806}
4813 4807
4814static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 4808static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4815 u64 nr, int nmi, 4809 u64 nr,
4816 struct perf_sample_data *data, 4810 struct perf_sample_data *data,
4817 struct pt_regs *regs) 4811 struct pt_regs *regs)
4818{ 4812{
@@ -4828,7 +4822,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4828 4822
4829 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 4823 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4830 if (perf_swevent_match(event, type, event_id, data, regs)) 4824 if (perf_swevent_match(event, type, event_id, data, regs))
4831 perf_swevent_event(event, nr, nmi, data, regs); 4825 perf_swevent_event(event, nr, data, regs);
4832 } 4826 }
4833end: 4827end:
4834 rcu_read_unlock(); 4828 rcu_read_unlock();
@@ -4849,8 +4843,7 @@ inline void perf_swevent_put_recursion_context(int rctx)
4849 put_recursion_context(swhash->recursion, rctx); 4843 put_recursion_context(swhash->recursion, rctx);
4850} 4844}
4851 4845
4852void __perf_sw_event(u32 event_id, u64 nr, int nmi, 4846void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
4853 struct pt_regs *regs, u64 addr)
4854{ 4847{
4855 struct perf_sample_data data; 4848 struct perf_sample_data data;
4856 int rctx; 4849 int rctx;
@@ -4862,7 +4855,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4862 4855
4863 perf_sample_data_init(&data, addr); 4856 perf_sample_data_init(&data, addr);
4864 4857
4865 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); 4858 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4866 4859
4867 perf_swevent_put_recursion_context(rctx); 4860 perf_swevent_put_recursion_context(rctx);
4868 preempt_enable_notrace(); 4861 preempt_enable_notrace();
@@ -5110,7 +5103,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5110 5103
5111 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 5104 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
5112 if (perf_tp_event_match(event, &data, regs)) 5105 if (perf_tp_event_match(event, &data, regs))
5113 perf_swevent_event(event, count, 1, &data, regs); 5106 perf_swevent_event(event, count, &data, regs);
5114 } 5107 }
5115 5108
5116 perf_swevent_put_recursion_context(rctx); 5109 perf_swevent_put_recursion_context(rctx);
@@ -5203,7 +5196,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
5203 perf_sample_data_init(&sample, bp->attr.bp_addr); 5196 perf_sample_data_init(&sample, bp->attr.bp_addr);
5204 5197
5205 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 5198 if (!bp->hw.state && !perf_exclude_event(bp, regs))
5206 perf_swevent_event(bp, 1, 1, &sample, regs); 5199 perf_swevent_event(bp, 1, &sample, regs);
5207} 5200}
5208#endif 5201#endif
5209 5202
@@ -5232,7 +5225,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5232 5225
5233 if (regs && !perf_exclude_event(event, regs)) { 5226 if (regs && !perf_exclude_event(event, regs)) {
5234 if (!(event->attr.exclude_idle && current->pid == 0)) 5227 if (!(event->attr.exclude_idle && current->pid == 0))
5235 if (perf_event_overflow(event, 0, &data, regs)) 5228 if (perf_event_overflow(event, &data, regs))
5236 ret = HRTIMER_NORESTART; 5229 ret = HRTIMER_NORESTART;
5237 } 5230 }
5238 5231
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 114f27f3a624..09097dd8116c 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -27,7 +27,6 @@ struct ring_buffer {
27 void *data_pages[0]; 27 void *data_pages[0];
28}; 28};
29 29
30
31extern void rb_free(struct ring_buffer *rb); 30extern void rb_free(struct ring_buffer *rb);
32extern struct ring_buffer * 31extern struct ring_buffer *
33rb_alloc(int nr_pages, long watermark, int cpu, int flags); 32rb_alloc(int nr_pages, long watermark, int cpu, int flags);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index fc2701c99207..8b3b73630fa4 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -38,11 +38,8 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
38{ 38{
39 atomic_set(&handle->rb->poll, POLL_IN); 39 atomic_set(&handle->rb->poll, POLL_IN);
40 40
41 if (handle->nmi) { 41 handle->event->pending_wakeup = 1;
42 handle->event->pending_wakeup = 1; 42 irq_work_queue(&handle->event->pending);
43 irq_work_queue(&handle->event->pending);
44 } else
45 perf_event_wakeup(handle->event);
46} 43}
47 44
48/* 45/*
@@ -102,7 +99,7 @@ out:
102 99
103int perf_output_begin(struct perf_output_handle *handle, 100int perf_output_begin(struct perf_output_handle *handle,
104 struct perf_event *event, unsigned int size, 101 struct perf_event *event, unsigned int size,
105 int nmi, int sample) 102 int sample)
106{ 103{
107 struct ring_buffer *rb; 104 struct ring_buffer *rb;
108 unsigned long tail, offset, head; 105 unsigned long tail, offset, head;
@@ -127,7 +124,6 @@ int perf_output_begin(struct perf_output_handle *handle,
127 124
128 handle->rb = rb; 125 handle->rb = rb;
129 handle->event = event; 126 handle->event = event;
130 handle->nmi = nmi;
131 handle->sample = sample; 127 handle->sample = sample;
132 128
133 if (!rb->nr_pages) 129 if (!rb->nr_pages)
diff --git a/kernel/sched.c b/kernel/sched.c
index 3f2e502d609b..d08d110b8976 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2220,7 +2220,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2220 2220
2221 if (task_cpu(p) != new_cpu) { 2221 if (task_cpu(p) != new_cpu) {
2222 p->se.nr_migrations++; 2222 p->se.nr_migrations++;
2223 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); 2223 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
2224 } 2224 }
2225 2225
2226 __set_task_cpu(p, new_cpu); 2226 __set_task_cpu(p, new_cpu);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 752b75ba662b..a6708e677a0a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -211,7 +211,7 @@ static struct perf_event_attr wd_hw_attr = {
211}; 211};
212 212
213/* Callback function for perf event subsystem */ 213/* Callback function for perf event subsystem */
214static void watchdog_overflow_callback(struct perf_event *event, int nmi, 214static void watchdog_overflow_callback(struct perf_event *event,
215 struct perf_sample_data *data, 215 struct perf_sample_data *data,
216 struct pt_regs *regs) 216 struct pt_regs *regs)
217{ 217{
diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c
index 063653955f9f..7b164d3200ff 100644
--- a/samples/hw_breakpoint/data_breakpoint.c
+++ b/samples/hw_breakpoint/data_breakpoint.c
@@ -41,7 +41,7 @@ module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO);
41MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any" 41MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any"
42 " write operations on the kernel symbol"); 42 " write operations on the kernel symbol");
43 43
44static void sample_hbp_handler(struct perf_event *bp, int nmi, 44static void sample_hbp_handler(struct perf_event *bp,
45 struct perf_sample_data *data, 45 struct perf_sample_data *data,
46 struct pt_regs *regs) 46 struct pt_regs *regs)
47{ 47{