aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-06-14 02:49:00 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:29 -0400
commit33696fc0d141bbbcb12f75b69608ea83282e3117 (patch)
tree72e08dba377d57eb7dd8c08a937a6de10e8af9c4
parent24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 (diff)
perf: Per PMU disable
Changes perf_disable() into perf_pmu_disable(). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/alpha/kernel/perf_event.c30
-rw-r--r--arch/arm/kernel/perf_event.c28
-rw-r--r--arch/powerpc/kernel/perf_event.c24
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c18
-rw-r--r--arch/sh/kernel/perf_event.c38
-rw-r--r--arch/sparc/kernel/perf_event.c20
-rw-r--r--arch/x86/kernel/cpu/perf_event.c16
-rw-r--r--include/linux/perf_event.h13
-rw-r--r--kernel/perf_event.c31
9 files changed, 119 insertions, 99 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 19660b5c298f..3e260731f8e6 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -435,7 +435,7 @@ static int alpha_pmu_enable(struct perf_event *event)
435 * nevertheless we disable the PMCs first to enable a potential 435 * nevertheless we disable the PMCs first to enable a potential
436 * final PMI to occur before we disable interrupts. 436 * final PMI to occur before we disable interrupts.
437 */ 437 */
438 perf_disable(); 438 perf_pmu_disable(event->pmu);
439 local_irq_save(flags); 439 local_irq_save(flags);
440 440
441 /* Default to error to be returned */ 441 /* Default to error to be returned */
@@ -456,7 +456,7 @@ static int alpha_pmu_enable(struct perf_event *event)
456 } 456 }
457 457
458 local_irq_restore(flags); 458 local_irq_restore(flags);
459 perf_enable(); 459 perf_pmu_enable(event->pmu);
460 460
461 return ret; 461 return ret;
462} 462}
@@ -474,7 +474,7 @@ static void alpha_pmu_disable(struct perf_event *event)
474 unsigned long flags; 474 unsigned long flags;
475 int j; 475 int j;
476 476
477 perf_disable(); 477 perf_pmu_disable(event->pmu);
478 local_irq_save(flags); 478 local_irq_save(flags);
479 479
480 for (j = 0; j < cpuc->n_events; j++) { 480 for (j = 0; j < cpuc->n_events; j++) {
@@ -502,7 +502,7 @@ static void alpha_pmu_disable(struct perf_event *event)
502 } 502 }
503 503
504 local_irq_restore(flags); 504 local_irq_restore(flags);
505 perf_enable(); 505 perf_pmu_enable(event->pmu);
506} 506}
507 507
508 508
@@ -668,18 +668,10 @@ static int alpha_pmu_event_init(struct perf_event *event)
668 return err; 668 return err;
669} 669}
670 670
671static struct pmu pmu = {
672 .event_init = alpha_pmu_event_init,
673 .enable = alpha_pmu_enable,
674 .disable = alpha_pmu_disable,
675 .read = alpha_pmu_read,
676 .unthrottle = alpha_pmu_unthrottle,
677};
678
679/* 671/*
680 * Main entry point - enable HW performance counters. 672 * Main entry point - enable HW performance counters.
681 */ 673 */
682void hw_perf_enable(void) 674static void alpha_pmu_pmu_enable(struct pmu *pmu)
683{ 675{
684 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 676 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
685 677
@@ -705,7 +697,7 @@ void hw_perf_enable(void)
705 * Main entry point - disable HW performance counters. 697 * Main entry point - disable HW performance counters.
706 */ 698 */
707 699
708void hw_perf_disable(void) 700static void alpha_pmu_pmu_disable(struct pmu *pmu)
709{ 701{
710 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 702 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
711 703
@@ -718,6 +710,16 @@ void hw_perf_disable(void)
718 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); 710 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
719} 711}
720 712
713static struct pmu pmu = {
714 .pmu_enable = alpha_pmu_pmu_enable,
715 .pmu_disable = alpha_pmu_pmu_disable,
716 .event_init = alpha_pmu_event_init,
717 .enable = alpha_pmu_enable,
718 .disable = alpha_pmu_disable,
719 .read = alpha_pmu_read,
720 .unthrottle = alpha_pmu_unthrottle,
721};
722
721 723
722/* 724/*
723 * Main entry point - don't know when this is called but it 725 * Main entry point - don't know when this is called but it
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index afc92c580d18..3343f3f4b973 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -277,7 +277,7 @@ armpmu_enable(struct perf_event *event)
277 int idx; 277 int idx;
278 int err = 0; 278 int err = 0;
279 279
280 perf_disable(); 280 perf_pmu_disable(event->pmu);
281 281
282 /* If we don't have a space for the counter then finish early. */ 282 /* If we don't have a space for the counter then finish early. */
283 idx = armpmu->get_event_idx(cpuc, hwc); 283 idx = armpmu->get_event_idx(cpuc, hwc);
@@ -305,7 +305,7 @@ armpmu_enable(struct perf_event *event)
305 perf_event_update_userpage(event); 305 perf_event_update_userpage(event);
306 306
307out: 307out:
308 perf_enable(); 308 perf_pmu_enable(event->pmu);
309 return err; 309 return err;
310} 310}
311 311
@@ -534,16 +534,7 @@ static int armpmu_event_init(struct perf_event *event)
534 return err; 534 return err;
535} 535}
536 536
537static struct pmu pmu = { 537static void armpmu_pmu_enable(struct pmu *pmu)
538 .event_init = armpmu_event_init,
539 .enable = armpmu_enable,
540 .disable = armpmu_disable,
541 .unthrottle = armpmu_unthrottle,
542 .read = armpmu_read,
543};
544
545void
546hw_perf_enable(void)
547{ 538{
548 /* Enable all of the perf events on hardware. */ 539 /* Enable all of the perf events on hardware. */
549 int idx; 540 int idx;
@@ -564,13 +555,22 @@ hw_perf_enable(void)
564 armpmu->start(); 555 armpmu->start();
565} 556}
566 557
567void 558static void armpmu_pmu_disable(struct pmu *pmu)
568hw_perf_disable(void)
569{ 559{
570 if (armpmu) 560 if (armpmu)
571 armpmu->stop(); 561 armpmu->stop();
572} 562}
573 563
564static struct pmu pmu = {
565 .pmu_enable = armpmu_pmu_enable,
566 .pmu_disable= armpmu_pmu_disable,
567 .event_init = armpmu_event_init,
568 .enable = armpmu_enable,
569 .disable = armpmu_disable,
570 .unthrottle = armpmu_unthrottle,
571 .read = armpmu_read,
572};
573
574/* 574/*
575 * ARMv6 Performance counter handling code. 575 * ARMv6 Performance counter handling code.
576 * 576 *
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index c1408821dbc2..deb84bbcb0e6 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -517,7 +517,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
517 * Disable all events to prevent PMU interrupts and to allow 517 * Disable all events to prevent PMU interrupts and to allow
518 * events to be added or removed. 518 * events to be added or removed.
519 */ 519 */
520void hw_perf_disable(void) 520static void power_pmu_pmu_disable(struct pmu *pmu)
521{ 521{
522 struct cpu_hw_events *cpuhw; 522 struct cpu_hw_events *cpuhw;
523 unsigned long flags; 523 unsigned long flags;
@@ -565,7 +565,7 @@ void hw_perf_disable(void)
565 * If we were previously disabled and events were added, then 565 * If we were previously disabled and events were added, then
566 * put the new config on the PMU. 566 * put the new config on the PMU.
567 */ 567 */
568void hw_perf_enable(void) 568static void power_pmu_pmu_enable(struct pmu *pmu)
569{ 569{
570 struct perf_event *event; 570 struct perf_event *event;
571 struct cpu_hw_events *cpuhw; 571 struct cpu_hw_events *cpuhw;
@@ -735,7 +735,7 @@ static int power_pmu_enable(struct perf_event *event)
735 int ret = -EAGAIN; 735 int ret = -EAGAIN;
736 736
737 local_irq_save(flags); 737 local_irq_save(flags);
738 perf_disable(); 738 perf_pmu_disable(event->pmu);
739 739
740 /* 740 /*
741 * Add the event to the list (if there is room) 741 * Add the event to the list (if there is room)
@@ -769,7 +769,7 @@ nocheck:
769 769
770 ret = 0; 770 ret = 0;
771 out: 771 out:
772 perf_enable(); 772 perf_pmu_enable(event->pmu);
773 local_irq_restore(flags); 773 local_irq_restore(flags);
774 return ret; 774 return ret;
775} 775}
@@ -784,7 +784,7 @@ static void power_pmu_disable(struct perf_event *event)
784 unsigned long flags; 784 unsigned long flags;
785 785
786 local_irq_save(flags); 786 local_irq_save(flags);
787 perf_disable(); 787 perf_pmu_disable(event->pmu);
788 788
789 power_pmu_read(event); 789 power_pmu_read(event);
790 790
@@ -821,7 +821,7 @@ static void power_pmu_disable(struct perf_event *event)
821 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); 821 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
822 } 822 }
823 823
824 perf_enable(); 824 perf_pmu_enable(event->pmu);
825 local_irq_restore(flags); 825 local_irq_restore(flags);
826} 826}
827 827
@@ -837,7 +837,7 @@ static void power_pmu_unthrottle(struct perf_event *event)
837 if (!event->hw.idx || !event->hw.sample_period) 837 if (!event->hw.idx || !event->hw.sample_period)
838 return; 838 return;
839 local_irq_save(flags); 839 local_irq_save(flags);
840 perf_disable(); 840 perf_pmu_disable(event->pmu);
841 power_pmu_read(event); 841 power_pmu_read(event);
842 left = event->hw.sample_period; 842 left = event->hw.sample_period;
843 event->hw.last_period = left; 843 event->hw.last_period = left;
@@ -848,7 +848,7 @@ static void power_pmu_unthrottle(struct perf_event *event)
848 local64_set(&event->hw.prev_count, val); 848 local64_set(&event->hw.prev_count, val);
849 local64_set(&event->hw.period_left, left); 849 local64_set(&event->hw.period_left, left);
850 perf_event_update_userpage(event); 850 perf_event_update_userpage(event);
851 perf_enable(); 851 perf_pmu_enable(event->pmu);
852 local_irq_restore(flags); 852 local_irq_restore(flags);
853} 853}
854 854
@@ -861,7 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu)
861{ 861{
862 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 862 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
863 863
864 perf_disable(); 864 perf_pmu_disable(pmu);
865 cpuhw->group_flag |= PERF_EVENT_TXN; 865 cpuhw->group_flag |= PERF_EVENT_TXN;
866 cpuhw->n_txn_start = cpuhw->n_events; 866 cpuhw->n_txn_start = cpuhw->n_events;
867} 867}
@@ -876,7 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu)
876 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 876 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
877 877
878 cpuhw->group_flag &= ~PERF_EVENT_TXN; 878 cpuhw->group_flag &= ~PERF_EVENT_TXN;
879 perf_enable(); 879 perf_pmu_enable(pmu);
880} 880}
881 881
882/* 882/*
@@ -903,7 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu)
903 cpuhw->event[i]->hw.config = cpuhw->events[i]; 903 cpuhw->event[i]->hw.config = cpuhw->events[i];
904 904
905 cpuhw->group_flag &= ~PERF_EVENT_TXN; 905 cpuhw->group_flag &= ~PERF_EVENT_TXN;
906 perf_enable(); 906 perf_pmu_enable(pmu);
907 return 0; 907 return 0;
908} 908}
909 909
@@ -1131,6 +1131,8 @@ static int power_pmu_event_init(struct perf_event *event)
1131} 1131}
1132 1132
1133struct pmu power_pmu = { 1133struct pmu power_pmu = {
1134 .pmu_enable = power_pmu_pmu_enable,
1135 .pmu_disable = power_pmu_pmu_disable,
1134 .event_init = power_pmu_event_init, 1136 .event_init = power_pmu_event_init,
1135 .enable = power_pmu_enable, 1137 .enable = power_pmu_enable,
1136 .disable = power_pmu_disable, 1138 .disable = power_pmu_disable,
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index 9bc84a7fd901..84b1974c628f 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -177,7 +177,7 @@ static void fsl_emb_pmu_read(struct perf_event *event)
177 * Disable all events to prevent PMU interrupts and to allow 177 * Disable all events to prevent PMU interrupts and to allow
178 * events to be added or removed. 178 * events to be added or removed.
179 */ 179 */
180void hw_perf_disable(void) 180static void fsl_emb_pmu_pmu_disable(struct pmu *pmu)
181{ 181{
182 struct cpu_hw_events *cpuhw; 182 struct cpu_hw_events *cpuhw;
183 unsigned long flags; 183 unsigned long flags;
@@ -216,7 +216,7 @@ void hw_perf_disable(void)
216 * If we were previously disabled and events were added, then 216 * If we were previously disabled and events were added, then
217 * put the new config on the PMU. 217 * put the new config on the PMU.
218 */ 218 */
219void hw_perf_enable(void) 219static void fsl_emb_pmu_pmu_enable(struct pmu *pmu)
220{ 220{
221 struct cpu_hw_events *cpuhw; 221 struct cpu_hw_events *cpuhw;
222 unsigned long flags; 222 unsigned long flags;
@@ -271,7 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
271 u64 val; 271 u64 val;
272 int i; 272 int i;
273 273
274 perf_disable(); 274 perf_pmu_disable(event->pmu);
275 cpuhw = &get_cpu_var(cpu_hw_events); 275 cpuhw = &get_cpu_var(cpu_hw_events);
276 276
277 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) 277 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -311,7 +311,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
311 ret = 0; 311 ret = 0;
312 out: 312 out:
313 put_cpu_var(cpu_hw_events); 313 put_cpu_var(cpu_hw_events);
314 perf_enable(); 314 perf_pmu_enable(event->pmu);
315 return ret; 315 return ret;
316} 316}
317 317
@@ -321,7 +321,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
321 struct cpu_hw_events *cpuhw; 321 struct cpu_hw_events *cpuhw;
322 int i = event->hw.idx; 322 int i = event->hw.idx;
323 323
324 perf_disable(); 324 perf_pmu_disable(event->pmu);
325 if (i < 0) 325 if (i < 0)
326 goto out; 326 goto out;
327 327
@@ -349,7 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
349 cpuhw->n_events--; 349 cpuhw->n_events--;
350 350
351 out: 351 out:
352 perf_enable(); 352 perf_pmu_enable(event->pmu);
353 put_cpu_var(cpu_hw_events); 353 put_cpu_var(cpu_hw_events);
354} 354}
355 355
@@ -367,7 +367,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
367 if (event->hw.idx < 0 || !event->hw.sample_period) 367 if (event->hw.idx < 0 || !event->hw.sample_period)
368 return; 368 return;
369 local_irq_save(flags); 369 local_irq_save(flags);
370 perf_disable(); 370 perf_pmu_disable(event->pmu);
371 fsl_emb_pmu_read(event); 371 fsl_emb_pmu_read(event);
372 left = event->hw.sample_period; 372 left = event->hw.sample_period;
373 event->hw.last_period = left; 373 event->hw.last_period = left;
@@ -378,7 +378,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
378 local64_set(&event->hw.prev_count, val); 378 local64_set(&event->hw.prev_count, val);
379 local64_set(&event->hw.period_left, left); 379 local64_set(&event->hw.period_left, left);
380 perf_event_update_userpage(event); 380 perf_event_update_userpage(event);
381 perf_enable(); 381 perf_pmu_enable(event->pmu);
382 local_irq_restore(flags); 382 local_irq_restore(flags);
383} 383}
384 384
@@ -524,6 +524,8 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
524} 524}
525 525
526static struct pmu fsl_emb_pmu = { 526static struct pmu fsl_emb_pmu = {
527 .pmu_enable = fsl_emb_pmu_pmu_enable,
528 .pmu_disable = fsl_emb_pmu_pmu_disable,
527 .event_init = fsl_emb_pmu_event_init, 529 .event_init = fsl_emb_pmu_event_init,
528 .enable = fsl_emb_pmu_enable, 530 .enable = fsl_emb_pmu_enable,
529 .disable = fsl_emb_pmu_disable, 531 .disable = fsl_emb_pmu_disable,
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index d042989ceb45..4bbe19058a58 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -232,7 +232,7 @@ static int sh_pmu_enable(struct perf_event *event)
232 int idx = hwc->idx; 232 int idx = hwc->idx;
233 int ret = -EAGAIN; 233 int ret = -EAGAIN;
234 234
235 perf_disable(); 235 perf_pmu_disable(event->pmu);
236 236
237 if (test_and_set_bit(idx, cpuc->used_mask)) { 237 if (test_and_set_bit(idx, cpuc->used_mask)) {
238 idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); 238 idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
@@ -253,7 +253,7 @@ static int sh_pmu_enable(struct perf_event *event)
253 perf_event_update_userpage(event); 253 perf_event_update_userpage(event);
254 ret = 0; 254 ret = 0;
255out: 255out:
256 perf_enable(); 256 perf_pmu_enable(event->pmu);
257 return ret; 257 return ret;
258} 258}
259 259
@@ -285,7 +285,25 @@ static int sh_pmu_event_init(struct perf_event *event)
285 return err; 285 return err;
286} 286}
287 287
288static void sh_pmu_pmu_enable(struct pmu *pmu)
289{
290 if (!sh_pmu_initialized())
291 return;
292
293 sh_pmu->enable_all();
294}
295
296static void sh_pmu_pmu_disable(struct pmu *pmu)
297{
298 if (!sh_pmu_initialized())
299 return;
300
301 sh_pmu->disable_all();
302}
303
288static struct pmu pmu = { 304static struct pmu pmu = {
305 .pmu_enable = sh_pmu_pmu_enable,
306 .pmu_disable = sh_pmu_pmu_disable,
289 .event_init = sh_pmu_event_init, 307 .event_init = sh_pmu_event_init,
290 .enable = sh_pmu_enable, 308 .enable = sh_pmu_enable,
291 .disable = sh_pmu_disable, 309 .disable = sh_pmu_disable,
@@ -316,22 +334,6 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
316 return NOTIFY_OK; 334 return NOTIFY_OK;
317} 335}
318 336
319void hw_perf_enable(void)
320{
321 if (!sh_pmu_initialized())
322 return;
323
324 sh_pmu->enable_all();
325}
326
327void hw_perf_disable(void)
328{
329 if (!sh_pmu_initialized())
330 return;
331
332 sh_pmu->disable_all();
333}
334
335int __cpuinit register_sh_pmu(struct sh_pmu *pmu) 337int __cpuinit register_sh_pmu(struct sh_pmu *pmu)
336{ 338{
337 if (sh_pmu) 339 if (sh_pmu)
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index d0131deeeaf6..37cae676536c 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -664,7 +664,7 @@ out:
664 return pcr; 664 return pcr;
665} 665}
666 666
667void hw_perf_enable(void) 667static void sparc_pmu_pmu_enable(struct pmu *pmu)
668{ 668{
669 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 669 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
670 u64 pcr; 670 u64 pcr;
@@ -691,7 +691,7 @@ void hw_perf_enable(void)
691 pcr_ops->write(cpuc->pcr); 691 pcr_ops->write(cpuc->pcr);
692} 692}
693 693
694void hw_perf_disable(void) 694static void sparc_pmu_pmu_disable(struct pmu *pmu)
695{ 695{
696 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 696 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
697 u64 val; 697 u64 val;
@@ -718,7 +718,7 @@ static void sparc_pmu_disable(struct perf_event *event)
718 int i; 718 int i;
719 719
720 local_irq_save(flags); 720 local_irq_save(flags);
721 perf_disable(); 721 perf_pmu_disable(event->pmu);
722 722
723 for (i = 0; i < cpuc->n_events; i++) { 723 for (i = 0; i < cpuc->n_events; i++) {
724 if (event == cpuc->event[i]) { 724 if (event == cpuc->event[i]) {
@@ -748,7 +748,7 @@ static void sparc_pmu_disable(struct perf_event *event)
748 } 748 }
749 } 749 }
750 750
751 perf_enable(); 751 perf_pmu_enable(event->pmu);
752 local_irq_restore(flags); 752 local_irq_restore(flags);
753} 753}
754 754
@@ -991,7 +991,7 @@ static int sparc_pmu_enable(struct perf_event *event)
991 unsigned long flags; 991 unsigned long flags;
992 992
993 local_irq_save(flags); 993 local_irq_save(flags);
994 perf_disable(); 994 perf_pmu_disable(event->pmu);
995 995
996 n0 = cpuc->n_events; 996 n0 = cpuc->n_events;
997 if (n0 >= perf_max_events) 997 if (n0 >= perf_max_events)
@@ -1020,7 +1020,7 @@ nocheck:
1020 1020
1021 ret = 0; 1021 ret = 0;
1022out: 1022out:
1023 perf_enable(); 1023 perf_pmu_enable(event->pmu);
1024 local_irq_restore(flags); 1024 local_irq_restore(flags);
1025 return ret; 1025 return ret;
1026} 1026}
@@ -1113,7 +1113,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
1113{ 1113{
1114 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1114 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1115 1115
1116 perf_disable(); 1116 perf_pmu_disable(pmu);
1117 cpuhw->group_flag |= PERF_EVENT_TXN; 1117 cpuhw->group_flag |= PERF_EVENT_TXN;
1118} 1118}
1119 1119
@@ -1127,7 +1127,7 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu)
1127 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1127 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1128 1128
1129 cpuhw->group_flag &= ~PERF_EVENT_TXN; 1129 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1130 perf_enable(); 1130 perf_pmu_enable(pmu);
1131} 1131}
1132 1132
1133/* 1133/*
@@ -1151,11 +1151,13 @@ static int sparc_pmu_commit_txn(struct pmu *pmu)
1151 return -EAGAIN; 1151 return -EAGAIN;
1152 1152
1153 cpuc->group_flag &= ~PERF_EVENT_TXN; 1153 cpuc->group_flag &= ~PERF_EVENT_TXN;
1154 perf_enable(); 1154 perf_pmu_enable(pmu);
1155 return 0; 1155 return 0;
1156} 1156}
1157 1157
1158static struct pmu pmu = { 1158static struct pmu pmu = {
1159 .pmu_enable = sparc_pmu_pmu_enable,
1160 .pmu_disable = sparc_pmu_pmu_disable,
1159 .event_init = sparc_pmu_event_init, 1161 .event_init = sparc_pmu_event_init,
1160 .enable = sparc_pmu_enable, 1162 .enable = sparc_pmu_enable,
1161 .disable = sparc_pmu_disable, 1163 .disable = sparc_pmu_disable,
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 846070ce49c3..79705ac45019 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -583,7 +583,7 @@ static void x86_pmu_disable_all(void)
583 } 583 }
584} 584}
585 585
586void hw_perf_disable(void) 586static void x86_pmu_pmu_disable(struct pmu *pmu)
587{ 587{
588 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 588 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
589 589
@@ -803,7 +803,7 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
803static int x86_pmu_start(struct perf_event *event); 803static int x86_pmu_start(struct perf_event *event);
804static void x86_pmu_stop(struct perf_event *event); 804static void x86_pmu_stop(struct perf_event *event);
805 805
806void hw_perf_enable(void) 806static void x86_pmu_pmu_enable(struct pmu *pmu)
807{ 807{
808 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 808 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
809 struct perf_event *event; 809 struct perf_event *event;
@@ -969,7 +969,7 @@ static int x86_pmu_enable(struct perf_event *event)
969 969
970 hwc = &event->hw; 970 hwc = &event->hw;
971 971
972 perf_disable(); 972 perf_pmu_disable(event->pmu);
973 n0 = cpuc->n_events; 973 n0 = cpuc->n_events;
974 ret = n = collect_events(cpuc, event, false); 974 ret = n = collect_events(cpuc, event, false);
975 if (ret < 0) 975 if (ret < 0)
@@ -999,7 +999,7 @@ done_collect:
999 999
1000 ret = 0; 1000 ret = 0;
1001out: 1001out:
1002 perf_enable(); 1002 perf_pmu_enable(event->pmu);
1003 return ret; 1003 return ret;
1004} 1004}
1005 1005
@@ -1436,7 +1436,7 @@ static void x86_pmu_start_txn(struct pmu *pmu)
1436{ 1436{
1437 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1437 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1438 1438
1439 perf_disable(); 1439 perf_pmu_disable(pmu);
1440 cpuc->group_flag |= PERF_EVENT_TXN; 1440 cpuc->group_flag |= PERF_EVENT_TXN;
1441 cpuc->n_txn = 0; 1441 cpuc->n_txn = 0;
1442} 1442}
@@ -1456,7 +1456,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
1456 */ 1456 */
1457 cpuc->n_added -= cpuc->n_txn; 1457 cpuc->n_added -= cpuc->n_txn;
1458 cpuc->n_events -= cpuc->n_txn; 1458 cpuc->n_events -= cpuc->n_txn;
1459 perf_enable(); 1459 perf_pmu_enable(pmu);
1460} 1460}
1461 1461
1462/* 1462/*
@@ -1486,7 +1486,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
1486 memcpy(cpuc->assign, assign, n*sizeof(int)); 1486 memcpy(cpuc->assign, assign, n*sizeof(int));
1487 1487
1488 cpuc->group_flag &= ~PERF_EVENT_TXN; 1488 cpuc->group_flag &= ~PERF_EVENT_TXN;
1489 perf_enable(); 1489 perf_pmu_enable(pmu);
1490 return 0; 1490 return 0;
1491} 1491}
1492 1492
@@ -1605,6 +1605,8 @@ int x86_pmu_event_init(struct perf_event *event)
1605} 1605}
1606 1606
1607static struct pmu pmu = { 1607static struct pmu pmu = {
1608 .pmu_enable = x86_pmu_pmu_enable,
1609 .pmu_disable = x86_pmu_pmu_disable,
1608 .event_init = x86_pmu_event_init, 1610 .event_init = x86_pmu_event_init,
1609 .enable = x86_pmu_enable, 1611 .enable = x86_pmu_enable,
1610 .disable = x86_pmu_disable, 1612 .disable = x86_pmu_disable,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 243286a8ded7..6abf103fb7f8 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -563,6 +563,11 @@ struct perf_event;
563struct pmu { 563struct pmu {
564 struct list_head entry; 564 struct list_head entry;
565 565
566 int *pmu_disable_count;
567
568 void (*pmu_enable) (struct pmu *pmu);
569 void (*pmu_disable) (struct pmu *pmu);
570
566 /* 571 /*
567 * Should return -ENOENT when the @event doesn't match this PMU. 572 * Should return -ENOENT when the @event doesn't match this PMU.
568 */ 573 */
@@ -868,10 +873,8 @@ extern void perf_event_free_task(struct task_struct *task);
868extern void set_perf_event_pending(void); 873extern void set_perf_event_pending(void);
869extern void perf_event_do_pending(void); 874extern void perf_event_do_pending(void);
870extern void perf_event_print_debug(void); 875extern void perf_event_print_debug(void);
871extern void __perf_disable(void); 876extern void perf_pmu_disable(struct pmu *pmu);
872extern bool __perf_enable(void); 877extern void perf_pmu_enable(struct pmu *pmu);
873extern void perf_disable(void);
874extern void perf_enable(void);
875extern int perf_event_task_disable(void); 878extern int perf_event_task_disable(void);
876extern int perf_event_task_enable(void); 879extern int perf_event_task_enable(void);
877extern void perf_event_update_userpage(struct perf_event *event); 880extern void perf_event_update_userpage(struct perf_event *event);
@@ -1056,8 +1059,6 @@ static inline void perf_event_exit_task(struct task_struct *child) { }
1056static inline void perf_event_free_task(struct task_struct *task) { } 1059static inline void perf_event_free_task(struct task_struct *task) { }
1057static inline void perf_event_do_pending(void) { } 1060static inline void perf_event_do_pending(void) { }
1058static inline void perf_event_print_debug(void) { } 1061static inline void perf_event_print_debug(void) { }
1059static inline void perf_disable(void) { }
1060static inline void perf_enable(void) { }
1061static inline int perf_event_task_disable(void) { return -EINVAL; } 1062static inline int perf_event_task_disable(void) { return -EINVAL; }
1062static inline int perf_event_task_enable(void) { return -EINVAL; } 1063static inline int perf_event_task_enable(void) { return -EINVAL; }
1063 1064
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 9a98ce953561..5ed0c06765bb 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -71,23 +71,20 @@ static atomic64_t perf_event_id;
71 */ 71 */
72static DEFINE_SPINLOCK(perf_resource_lock); 72static DEFINE_SPINLOCK(perf_resource_lock);
73 73
74void __weak hw_perf_disable(void) { barrier(); }
75void __weak hw_perf_enable(void) { barrier(); }
76
77void __weak perf_event_print_debug(void) { } 74void __weak perf_event_print_debug(void) { }
78 75
79static DEFINE_PER_CPU(int, perf_disable_count); 76void perf_pmu_disable(struct pmu *pmu)
80
81void perf_disable(void)
82{ 77{
83 if (!__get_cpu_var(perf_disable_count)++) 78 int *count = this_cpu_ptr(pmu->pmu_disable_count);
84 hw_perf_disable(); 79 if (!(*count)++)
80 pmu->pmu_disable(pmu);
85} 81}
86 82
87void perf_enable(void) 83void perf_pmu_enable(struct pmu *pmu)
88{ 84{
89 if (!--__get_cpu_var(perf_disable_count)) 85 int *count = this_cpu_ptr(pmu->pmu_disable_count);
90 hw_perf_enable(); 86 if (!--(*count))
87 pmu->pmu_enable(pmu);
91} 88}
92 89
93static void get_ctx(struct perf_event_context *ctx) 90static void get_ctx(struct perf_event_context *ctx)
@@ -4970,11 +4967,19 @@ static struct srcu_struct pmus_srcu;
4970 4967
4971int perf_pmu_register(struct pmu *pmu) 4968int perf_pmu_register(struct pmu *pmu)
4972{ 4969{
4970 int ret;
4971
4973 mutex_lock(&pmus_lock); 4972 mutex_lock(&pmus_lock);
4973 ret = -ENOMEM;
4974 pmu->pmu_disable_count = alloc_percpu(int);
4975 if (!pmu->pmu_disable_count)
4976 goto unlock;
4974 list_add_rcu(&pmu->entry, &pmus); 4977 list_add_rcu(&pmu->entry, &pmus);
4978 ret = 0;
4979unlock:
4975 mutex_unlock(&pmus_lock); 4980 mutex_unlock(&pmus_lock);
4976 4981
4977 return 0; 4982 return ret;
4978} 4983}
4979 4984
4980void perf_pmu_unregister(struct pmu *pmu) 4985void perf_pmu_unregister(struct pmu *pmu)
@@ -4984,6 +4989,8 @@ void perf_pmu_unregister(struct pmu *pmu)
4984 mutex_unlock(&pmus_lock); 4989 mutex_unlock(&pmus_lock);
4985 4990
4986 synchronize_srcu(&pmus_srcu); 4991 synchronize_srcu(&pmus_srcu);
4992
4993 free_percpu(pmu->pmu_disable_count);
4987} 4994}
4988 4995
4989struct pmu *perf_init_event(struct perf_event *event) 4996struct pmu *perf_init_event(struct perf_event *event)