diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/alpha/kernel/perf_event.c | 30 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 28 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 24 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_event_fsl_emb.c | 18 | ||||
-rw-r--r-- | arch/sh/kernel/perf_event.c | 38 | ||||
-rw-r--r-- | arch/sparc/kernel/perf_event.c | 20 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 16 |
7 files changed, 93 insertions, 81 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 19660b5c298f..3e260731f8e6 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c | |||
@@ -435,7 +435,7 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
435 | * nevertheless we disable the PMCs first to enable a potential | 435 | * nevertheless we disable the PMCs first to enable a potential |
436 | * final PMI to occur before we disable interrupts. | 436 | * final PMI to occur before we disable interrupts. |
437 | */ | 437 | */ |
438 | perf_disable(); | 438 | perf_pmu_disable(event->pmu); |
439 | local_irq_save(flags); | 439 | local_irq_save(flags); |
440 | 440 | ||
441 | /* Default to error to be returned */ | 441 | /* Default to error to be returned */ |
@@ -456,7 +456,7 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
456 | } | 456 | } |
457 | 457 | ||
458 | local_irq_restore(flags); | 458 | local_irq_restore(flags); |
459 | perf_enable(); | 459 | perf_pmu_enable(event->pmu); |
460 | 460 | ||
461 | return ret; | 461 | return ret; |
462 | } | 462 | } |
@@ -474,7 +474,7 @@ static void alpha_pmu_disable(struct perf_event *event) | |||
474 | unsigned long flags; | 474 | unsigned long flags; |
475 | int j; | 475 | int j; |
476 | 476 | ||
477 | perf_disable(); | 477 | perf_pmu_disable(event->pmu); |
478 | local_irq_save(flags); | 478 | local_irq_save(flags); |
479 | 479 | ||
480 | for (j = 0; j < cpuc->n_events; j++) { | 480 | for (j = 0; j < cpuc->n_events; j++) { |
@@ -502,7 +502,7 @@ static void alpha_pmu_disable(struct perf_event *event) | |||
502 | } | 502 | } |
503 | 503 | ||
504 | local_irq_restore(flags); | 504 | local_irq_restore(flags); |
505 | perf_enable(); | 505 | perf_pmu_enable(event->pmu); |
506 | } | 506 | } |
507 | 507 | ||
508 | 508 | ||
@@ -668,18 +668,10 @@ static int alpha_pmu_event_init(struct perf_event *event) | |||
668 | return err; | 668 | return err; |
669 | } | 669 | } |
670 | 670 | ||
671 | static struct pmu pmu = { | ||
672 | .event_init = alpha_pmu_event_init, | ||
673 | .enable = alpha_pmu_enable, | ||
674 | .disable = alpha_pmu_disable, | ||
675 | .read = alpha_pmu_read, | ||
676 | .unthrottle = alpha_pmu_unthrottle, | ||
677 | }; | ||
678 | |||
679 | /* | 671 | /* |
680 | * Main entry point - enable HW performance counters. | 672 | * Main entry point - enable HW performance counters. |
681 | */ | 673 | */ |
682 | void hw_perf_enable(void) | 674 | static void alpha_pmu_pmu_enable(struct pmu *pmu) |
683 | { | 675 | { |
684 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 676 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
685 | 677 | ||
@@ -705,7 +697,7 @@ void hw_perf_enable(void) | |||
705 | * Main entry point - disable HW performance counters. | 697 | * Main entry point - disable HW performance counters. |
706 | */ | 698 | */ |
707 | 699 | ||
708 | void hw_perf_disable(void) | 700 | static void alpha_pmu_pmu_disable(struct pmu *pmu) |
709 | { | 701 | { |
710 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 702 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
711 | 703 | ||
@@ -718,6 +710,16 @@ void hw_perf_disable(void) | |||
718 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | 710 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); |
719 | } | 711 | } |
720 | 712 | ||
713 | static struct pmu pmu = { | ||
714 | .pmu_enable = alpha_pmu_pmu_enable, | ||
715 | .pmu_disable = alpha_pmu_pmu_disable, | ||
716 | .event_init = alpha_pmu_event_init, | ||
717 | .enable = alpha_pmu_enable, | ||
718 | .disable = alpha_pmu_disable, | ||
719 | .read = alpha_pmu_read, | ||
720 | .unthrottle = alpha_pmu_unthrottle, | ||
721 | }; | ||
722 | |||
721 | 723 | ||
722 | /* | 724 | /* |
723 | * Main entry point - don't know when this is called but it | 725 | * Main entry point - don't know when this is called but it |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index afc92c580d18..3343f3f4b973 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -277,7 +277,7 @@ armpmu_enable(struct perf_event *event) | |||
277 | int idx; | 277 | int idx; |
278 | int err = 0; | 278 | int err = 0; |
279 | 279 | ||
280 | perf_disable(); | 280 | perf_pmu_disable(event->pmu); |
281 | 281 | ||
282 | /* If we don't have a space for the counter then finish early. */ | 282 | /* If we don't have a space for the counter then finish early. */ |
283 | idx = armpmu->get_event_idx(cpuc, hwc); | 283 | idx = armpmu->get_event_idx(cpuc, hwc); |
@@ -305,7 +305,7 @@ armpmu_enable(struct perf_event *event) | |||
305 | perf_event_update_userpage(event); | 305 | perf_event_update_userpage(event); |
306 | 306 | ||
307 | out: | 307 | out: |
308 | perf_enable(); | 308 | perf_pmu_enable(event->pmu); |
309 | return err; | 309 | return err; |
310 | } | 310 | } |
311 | 311 | ||
@@ -534,16 +534,7 @@ static int armpmu_event_init(struct perf_event *event) | |||
534 | return err; | 534 | return err; |
535 | } | 535 | } |
536 | 536 | ||
537 | static struct pmu pmu = { | 537 | static void armpmu_pmu_enable(struct pmu *pmu) |
538 | .event_init = armpmu_event_init, | ||
539 | .enable = armpmu_enable, | ||
540 | .disable = armpmu_disable, | ||
541 | .unthrottle = armpmu_unthrottle, | ||
542 | .read = armpmu_read, | ||
543 | }; | ||
544 | |||
545 | void | ||
546 | hw_perf_enable(void) | ||
547 | { | 538 | { |
548 | /* Enable all of the perf events on hardware. */ | 539 | /* Enable all of the perf events on hardware. */ |
549 | int idx; | 540 | int idx; |
@@ -564,13 +555,22 @@ hw_perf_enable(void) | |||
564 | armpmu->start(); | 555 | armpmu->start(); |
565 | } | 556 | } |
566 | 557 | ||
567 | void | 558 | static void armpmu_pmu_disable(struct pmu *pmu) |
568 | hw_perf_disable(void) | ||
569 | { | 559 | { |
570 | if (armpmu) | 560 | if (armpmu) |
571 | armpmu->stop(); | 561 | armpmu->stop(); |
572 | } | 562 | } |
573 | 563 | ||
564 | static struct pmu pmu = { | ||
565 | .pmu_enable = armpmu_pmu_enable, | ||
566 | .pmu_disable= armpmu_pmu_disable, | ||
567 | .event_init = armpmu_event_init, | ||
568 | .enable = armpmu_enable, | ||
569 | .disable = armpmu_disable, | ||
570 | .unthrottle = armpmu_unthrottle, | ||
571 | .read = armpmu_read, | ||
572 | }; | ||
573 | |||
574 | /* | 574 | /* |
575 | * ARMv6 Performance counter handling code. | 575 | * ARMv6 Performance counter handling code. |
576 | * | 576 | * |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index c1408821dbc2..deb84bbcb0e6 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -517,7 +517,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) | |||
517 | * Disable all events to prevent PMU interrupts and to allow | 517 | * Disable all events to prevent PMU interrupts and to allow |
518 | * events to be added or removed. | 518 | * events to be added or removed. |
519 | */ | 519 | */ |
520 | void hw_perf_disable(void) | 520 | static void power_pmu_pmu_disable(struct pmu *pmu) |
521 | { | 521 | { |
522 | struct cpu_hw_events *cpuhw; | 522 | struct cpu_hw_events *cpuhw; |
523 | unsigned long flags; | 523 | unsigned long flags; |
@@ -565,7 +565,7 @@ void hw_perf_disable(void) | |||
565 | * If we were previously disabled and events were added, then | 565 | * If we were previously disabled and events were added, then |
566 | * put the new config on the PMU. | 566 | * put the new config on the PMU. |
567 | */ | 567 | */ |
568 | void hw_perf_enable(void) | 568 | static void power_pmu_pmu_enable(struct pmu *pmu) |
569 | { | 569 | { |
570 | struct perf_event *event; | 570 | struct perf_event *event; |
571 | struct cpu_hw_events *cpuhw; | 571 | struct cpu_hw_events *cpuhw; |
@@ -735,7 +735,7 @@ static int power_pmu_enable(struct perf_event *event) | |||
735 | int ret = -EAGAIN; | 735 | int ret = -EAGAIN; |
736 | 736 | ||
737 | local_irq_save(flags); | 737 | local_irq_save(flags); |
738 | perf_disable(); | 738 | perf_pmu_disable(event->pmu); |
739 | 739 | ||
740 | /* | 740 | /* |
741 | * Add the event to the list (if there is room) | 741 | * Add the event to the list (if there is room) |
@@ -769,7 +769,7 @@ nocheck: | |||
769 | 769 | ||
770 | ret = 0; | 770 | ret = 0; |
771 | out: | 771 | out: |
772 | perf_enable(); | 772 | perf_pmu_enable(event->pmu); |
773 | local_irq_restore(flags); | 773 | local_irq_restore(flags); |
774 | return ret; | 774 | return ret; |
775 | } | 775 | } |
@@ -784,7 +784,7 @@ static void power_pmu_disable(struct perf_event *event) | |||
784 | unsigned long flags; | 784 | unsigned long flags; |
785 | 785 | ||
786 | local_irq_save(flags); | 786 | local_irq_save(flags); |
787 | perf_disable(); | 787 | perf_pmu_disable(event->pmu); |
788 | 788 | ||
789 | power_pmu_read(event); | 789 | power_pmu_read(event); |
790 | 790 | ||
@@ -821,7 +821,7 @@ static void power_pmu_disable(struct perf_event *event) | |||
821 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); | 821 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); |
822 | } | 822 | } |
823 | 823 | ||
824 | perf_enable(); | 824 | perf_pmu_enable(event->pmu); |
825 | local_irq_restore(flags); | 825 | local_irq_restore(flags); |
826 | } | 826 | } |
827 | 827 | ||
@@ -837,7 +837,7 @@ static void power_pmu_unthrottle(struct perf_event *event) | |||
837 | if (!event->hw.idx || !event->hw.sample_period) | 837 | if (!event->hw.idx || !event->hw.sample_period) |
838 | return; | 838 | return; |
839 | local_irq_save(flags); | 839 | local_irq_save(flags); |
840 | perf_disable(); | 840 | perf_pmu_disable(event->pmu); |
841 | power_pmu_read(event); | 841 | power_pmu_read(event); |
842 | left = event->hw.sample_period; | 842 | left = event->hw.sample_period; |
843 | event->hw.last_period = left; | 843 | event->hw.last_period = left; |
@@ -848,7 +848,7 @@ static void power_pmu_unthrottle(struct perf_event *event) | |||
848 | local64_set(&event->hw.prev_count, val); | 848 | local64_set(&event->hw.prev_count, val); |
849 | local64_set(&event->hw.period_left, left); | 849 | local64_set(&event->hw.period_left, left); |
850 | perf_event_update_userpage(event); | 850 | perf_event_update_userpage(event); |
851 | perf_enable(); | 851 | perf_pmu_enable(event->pmu); |
852 | local_irq_restore(flags); | 852 | local_irq_restore(flags); |
853 | } | 853 | } |
854 | 854 | ||
@@ -861,7 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu) | |||
861 | { | 861 | { |
862 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 862 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
863 | 863 | ||
864 | perf_disable(); | 864 | perf_pmu_disable(pmu); |
865 | cpuhw->group_flag |= PERF_EVENT_TXN; | 865 | cpuhw->group_flag |= PERF_EVENT_TXN; |
866 | cpuhw->n_txn_start = cpuhw->n_events; | 866 | cpuhw->n_txn_start = cpuhw->n_events; |
867 | } | 867 | } |
@@ -876,7 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu) | |||
876 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 876 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
877 | 877 | ||
878 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 878 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
879 | perf_enable(); | 879 | perf_pmu_enable(pmu); |
880 | } | 880 | } |
881 | 881 | ||
882 | /* | 882 | /* |
@@ -903,7 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu) | |||
903 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | 903 | cpuhw->event[i]->hw.config = cpuhw->events[i]; |
904 | 904 | ||
905 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 905 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
906 | perf_enable(); | 906 | perf_pmu_enable(pmu); |
907 | return 0; | 907 | return 0; |
908 | } | 908 | } |
909 | 909 | ||
@@ -1131,6 +1131,8 @@ static int power_pmu_event_init(struct perf_event *event) | |||
1131 | } | 1131 | } |
1132 | 1132 | ||
1133 | struct pmu power_pmu = { | 1133 | struct pmu power_pmu = { |
1134 | .pmu_enable = power_pmu_pmu_enable, | ||
1135 | .pmu_disable = power_pmu_pmu_disable, | ||
1134 | .event_init = power_pmu_event_init, | 1136 | .event_init = power_pmu_event_init, |
1135 | .enable = power_pmu_enable, | 1137 | .enable = power_pmu_enable, |
1136 | .disable = power_pmu_disable, | 1138 | .disable = power_pmu_disable, |
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index 9bc84a7fd901..84b1974c628f 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c | |||
@@ -177,7 +177,7 @@ static void fsl_emb_pmu_read(struct perf_event *event) | |||
177 | * Disable all events to prevent PMU interrupts and to allow | 177 | * Disable all events to prevent PMU interrupts and to allow |
178 | * events to be added or removed. | 178 | * events to be added or removed. |
179 | */ | 179 | */ |
180 | void hw_perf_disable(void) | 180 | static void fsl_emb_pmu_pmu_disable(struct pmu *pmu) |
181 | { | 181 | { |
182 | struct cpu_hw_events *cpuhw; | 182 | struct cpu_hw_events *cpuhw; |
183 | unsigned long flags; | 183 | unsigned long flags; |
@@ -216,7 +216,7 @@ void hw_perf_disable(void) | |||
216 | * If we were previously disabled and events were added, then | 216 | * If we were previously disabled and events were added, then |
217 | * put the new config on the PMU. | 217 | * put the new config on the PMU. |
218 | */ | 218 | */ |
219 | void hw_perf_enable(void) | 219 | static void fsl_emb_pmu_pmu_enable(struct pmu *pmu) |
220 | { | 220 | { |
221 | struct cpu_hw_events *cpuhw; | 221 | struct cpu_hw_events *cpuhw; |
222 | unsigned long flags; | 222 | unsigned long flags; |
@@ -271,7 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
271 | u64 val; | 271 | u64 val; |
272 | int i; | 272 | int i; |
273 | 273 | ||
274 | perf_disable(); | 274 | perf_pmu_disable(event->pmu); |
275 | cpuhw = &get_cpu_var(cpu_hw_events); | 275 | cpuhw = &get_cpu_var(cpu_hw_events); |
276 | 276 | ||
277 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) | 277 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) |
@@ -311,7 +311,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
311 | ret = 0; | 311 | ret = 0; |
312 | out: | 312 | out: |
313 | put_cpu_var(cpu_hw_events); | 313 | put_cpu_var(cpu_hw_events); |
314 | perf_enable(); | 314 | perf_pmu_enable(event->pmu); |
315 | return ret; | 315 | return ret; |
316 | } | 316 | } |
317 | 317 | ||
@@ -321,7 +321,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event) | |||
321 | struct cpu_hw_events *cpuhw; | 321 | struct cpu_hw_events *cpuhw; |
322 | int i = event->hw.idx; | 322 | int i = event->hw.idx; |
323 | 323 | ||
324 | perf_disable(); | 324 | perf_pmu_disable(event->pmu); |
325 | if (i < 0) | 325 | if (i < 0) |
326 | goto out; | 326 | goto out; |
327 | 327 | ||
@@ -349,7 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event) | |||
349 | cpuhw->n_events--; | 349 | cpuhw->n_events--; |
350 | 350 | ||
351 | out: | 351 | out: |
352 | perf_enable(); | 352 | perf_pmu_enable(event->pmu); |
353 | put_cpu_var(cpu_hw_events); | 353 | put_cpu_var(cpu_hw_events); |
354 | } | 354 | } |
355 | 355 | ||
@@ -367,7 +367,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event) | |||
367 | if (event->hw.idx < 0 || !event->hw.sample_period) | 367 | if (event->hw.idx < 0 || !event->hw.sample_period) |
368 | return; | 368 | return; |
369 | local_irq_save(flags); | 369 | local_irq_save(flags); |
370 | perf_disable(); | 370 | perf_pmu_disable(event->pmu); |
371 | fsl_emb_pmu_read(event); | 371 | fsl_emb_pmu_read(event); |
372 | left = event->hw.sample_period; | 372 | left = event->hw.sample_period; |
373 | event->hw.last_period = left; | 373 | event->hw.last_period = left; |
@@ -378,7 +378,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event) | |||
378 | local64_set(&event->hw.prev_count, val); | 378 | local64_set(&event->hw.prev_count, val); |
379 | local64_set(&event->hw.period_left, left); | 379 | local64_set(&event->hw.period_left, left); |
380 | perf_event_update_userpage(event); | 380 | perf_event_update_userpage(event); |
381 | perf_enable(); | 381 | perf_pmu_enable(event->pmu); |
382 | local_irq_restore(flags); | 382 | local_irq_restore(flags); |
383 | } | 383 | } |
384 | 384 | ||
@@ -524,6 +524,8 @@ static int fsl_emb_pmu_event_init(struct perf_event *event) | |||
524 | } | 524 | } |
525 | 525 | ||
526 | static struct pmu fsl_emb_pmu = { | 526 | static struct pmu fsl_emb_pmu = { |
527 | .pmu_enable = fsl_emb_pmu_pmu_enable, | ||
528 | .pmu_disable = fsl_emb_pmu_pmu_disable, | ||
527 | .event_init = fsl_emb_pmu_event_init, | 529 | .event_init = fsl_emb_pmu_event_init, |
528 | .enable = fsl_emb_pmu_enable, | 530 | .enable = fsl_emb_pmu_enable, |
529 | .disable = fsl_emb_pmu_disable, | 531 | .disable = fsl_emb_pmu_disable, |
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index d042989ceb45..4bbe19058a58 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
@@ -232,7 +232,7 @@ static int sh_pmu_enable(struct perf_event *event) | |||
232 | int idx = hwc->idx; | 232 | int idx = hwc->idx; |
233 | int ret = -EAGAIN; | 233 | int ret = -EAGAIN; |
234 | 234 | ||
235 | perf_disable(); | 235 | perf_pmu_disable(event->pmu); |
236 | 236 | ||
237 | if (test_and_set_bit(idx, cpuc->used_mask)) { | 237 | if (test_and_set_bit(idx, cpuc->used_mask)) { |
238 | idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); | 238 | idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); |
@@ -253,7 +253,7 @@ static int sh_pmu_enable(struct perf_event *event) | |||
253 | perf_event_update_userpage(event); | 253 | perf_event_update_userpage(event); |
254 | ret = 0; | 254 | ret = 0; |
255 | out: | 255 | out: |
256 | perf_enable(); | 256 | perf_pmu_enable(event->pmu); |
257 | return ret; | 257 | return ret; |
258 | } | 258 | } |
259 | 259 | ||
@@ -285,7 +285,25 @@ static int sh_pmu_event_init(struct perf_event *event) | |||
285 | return err; | 285 | return err; |
286 | } | 286 | } |
287 | 287 | ||
288 | static void sh_pmu_pmu_enable(struct pmu *pmu) | ||
289 | { | ||
290 | if (!sh_pmu_initialized()) | ||
291 | return; | ||
292 | |||
293 | sh_pmu->enable_all(); | ||
294 | } | ||
295 | |||
296 | static void sh_pmu_pmu_disable(struct pmu *pmu) | ||
297 | { | ||
298 | if (!sh_pmu_initialized()) | ||
299 | return; | ||
300 | |||
301 | sh_pmu->disable_all(); | ||
302 | } | ||
303 | |||
288 | static struct pmu pmu = { | 304 | static struct pmu pmu = { |
305 | .pmu_enable = sh_pmu_pmu_enable, | ||
306 | .pmu_disable = sh_pmu_pmu_disable, | ||
289 | .event_init = sh_pmu_event_init, | 307 | .event_init = sh_pmu_event_init, |
290 | .enable = sh_pmu_enable, | 308 | .enable = sh_pmu_enable, |
291 | .disable = sh_pmu_disable, | 309 | .disable = sh_pmu_disable, |
@@ -316,22 +334,6 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
316 | return NOTIFY_OK; | 334 | return NOTIFY_OK; |
317 | } | 335 | } |
318 | 336 | ||
319 | void hw_perf_enable(void) | ||
320 | { | ||
321 | if (!sh_pmu_initialized()) | ||
322 | return; | ||
323 | |||
324 | sh_pmu->enable_all(); | ||
325 | } | ||
326 | |||
327 | void hw_perf_disable(void) | ||
328 | { | ||
329 | if (!sh_pmu_initialized()) | ||
330 | return; | ||
331 | |||
332 | sh_pmu->disable_all(); | ||
333 | } | ||
334 | |||
335 | int __cpuinit register_sh_pmu(struct sh_pmu *pmu) | 337 | int __cpuinit register_sh_pmu(struct sh_pmu *pmu) |
336 | { | 338 | { |
337 | if (sh_pmu) | 339 | if (sh_pmu) |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index d0131deeeaf6..37cae676536c 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -664,7 +664,7 @@ out: | |||
664 | return pcr; | 664 | return pcr; |
665 | } | 665 | } |
666 | 666 | ||
667 | void hw_perf_enable(void) | 667 | static void sparc_pmu_pmu_enable(struct pmu *pmu) |
668 | { | 668 | { |
669 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 669 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
670 | u64 pcr; | 670 | u64 pcr; |
@@ -691,7 +691,7 @@ void hw_perf_enable(void) | |||
691 | pcr_ops->write(cpuc->pcr); | 691 | pcr_ops->write(cpuc->pcr); |
692 | } | 692 | } |
693 | 693 | ||
694 | void hw_perf_disable(void) | 694 | static void sparc_pmu_pmu_disable(struct pmu *pmu) |
695 | { | 695 | { |
696 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 696 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
697 | u64 val; | 697 | u64 val; |
@@ -718,7 +718,7 @@ static void sparc_pmu_disable(struct perf_event *event) | |||
718 | int i; | 718 | int i; |
719 | 719 | ||
720 | local_irq_save(flags); | 720 | local_irq_save(flags); |
721 | perf_disable(); | 721 | perf_pmu_disable(event->pmu); |
722 | 722 | ||
723 | for (i = 0; i < cpuc->n_events; i++) { | 723 | for (i = 0; i < cpuc->n_events; i++) { |
724 | if (event == cpuc->event[i]) { | 724 | if (event == cpuc->event[i]) { |
@@ -748,7 +748,7 @@ static void sparc_pmu_disable(struct perf_event *event) | |||
748 | } | 748 | } |
749 | } | 749 | } |
750 | 750 | ||
751 | perf_enable(); | 751 | perf_pmu_enable(event->pmu); |
752 | local_irq_restore(flags); | 752 | local_irq_restore(flags); |
753 | } | 753 | } |
754 | 754 | ||
@@ -991,7 +991,7 @@ static int sparc_pmu_enable(struct perf_event *event) | |||
991 | unsigned long flags; | 991 | unsigned long flags; |
992 | 992 | ||
993 | local_irq_save(flags); | 993 | local_irq_save(flags); |
994 | perf_disable(); | 994 | perf_pmu_disable(event->pmu); |
995 | 995 | ||
996 | n0 = cpuc->n_events; | 996 | n0 = cpuc->n_events; |
997 | if (n0 >= perf_max_events) | 997 | if (n0 >= perf_max_events) |
@@ -1020,7 +1020,7 @@ nocheck: | |||
1020 | 1020 | ||
1021 | ret = 0; | 1021 | ret = 0; |
1022 | out: | 1022 | out: |
1023 | perf_enable(); | 1023 | perf_pmu_enable(event->pmu); |
1024 | local_irq_restore(flags); | 1024 | local_irq_restore(flags); |
1025 | return ret; | 1025 | return ret; |
1026 | } | 1026 | } |
@@ -1113,7 +1113,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu) | |||
1113 | { | 1113 | { |
1114 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1114 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1115 | 1115 | ||
1116 | perf_disable(); | 1116 | perf_pmu_disable(pmu); |
1117 | cpuhw->group_flag |= PERF_EVENT_TXN; | 1117 | cpuhw->group_flag |= PERF_EVENT_TXN; |
1118 | } | 1118 | } |
1119 | 1119 | ||
@@ -1127,7 +1127,7 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu) | |||
1127 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1127 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1128 | 1128 | ||
1129 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 1129 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
1130 | perf_enable(); | 1130 | perf_pmu_enable(pmu); |
1131 | } | 1131 | } |
1132 | 1132 | ||
1133 | /* | 1133 | /* |
@@ -1151,11 +1151,13 @@ static int sparc_pmu_commit_txn(struct pmu *pmu) | |||
1151 | return -EAGAIN; | 1151 | return -EAGAIN; |
1152 | 1152 | ||
1153 | cpuc->group_flag &= ~PERF_EVENT_TXN; | 1153 | cpuc->group_flag &= ~PERF_EVENT_TXN; |
1154 | perf_enable(); | 1154 | perf_pmu_enable(pmu); |
1155 | return 0; | 1155 | return 0; |
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | static struct pmu pmu = { | 1158 | static struct pmu pmu = { |
1159 | .pmu_enable = sparc_pmu_pmu_enable, | ||
1160 | .pmu_disable = sparc_pmu_pmu_disable, | ||
1159 | .event_init = sparc_pmu_event_init, | 1161 | .event_init = sparc_pmu_event_init, |
1160 | .enable = sparc_pmu_enable, | 1162 | .enable = sparc_pmu_enable, |
1161 | .disable = sparc_pmu_disable, | 1163 | .disable = sparc_pmu_disable, |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 846070ce49c3..79705ac45019 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -583,7 +583,7 @@ static void x86_pmu_disable_all(void) | |||
583 | } | 583 | } |
584 | } | 584 | } |
585 | 585 | ||
586 | void hw_perf_disable(void) | 586 | static void x86_pmu_pmu_disable(struct pmu *pmu) |
587 | { | 587 | { |
588 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 588 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
589 | 589 | ||
@@ -803,7 +803,7 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc, | |||
803 | static int x86_pmu_start(struct perf_event *event); | 803 | static int x86_pmu_start(struct perf_event *event); |
804 | static void x86_pmu_stop(struct perf_event *event); | 804 | static void x86_pmu_stop(struct perf_event *event); |
805 | 805 | ||
806 | void hw_perf_enable(void) | 806 | static void x86_pmu_pmu_enable(struct pmu *pmu) |
807 | { | 807 | { |
808 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 808 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
809 | struct perf_event *event; | 809 | struct perf_event *event; |
@@ -969,7 +969,7 @@ static int x86_pmu_enable(struct perf_event *event) | |||
969 | 969 | ||
970 | hwc = &event->hw; | 970 | hwc = &event->hw; |
971 | 971 | ||
972 | perf_disable(); | 972 | perf_pmu_disable(event->pmu); |
973 | n0 = cpuc->n_events; | 973 | n0 = cpuc->n_events; |
974 | ret = n = collect_events(cpuc, event, false); | 974 | ret = n = collect_events(cpuc, event, false); |
975 | if (ret < 0) | 975 | if (ret < 0) |
@@ -999,7 +999,7 @@ done_collect: | |||
999 | 999 | ||
1000 | ret = 0; | 1000 | ret = 0; |
1001 | out: | 1001 | out: |
1002 | perf_enable(); | 1002 | perf_pmu_enable(event->pmu); |
1003 | return ret; | 1003 | return ret; |
1004 | } | 1004 | } |
1005 | 1005 | ||
@@ -1436,7 +1436,7 @@ static void x86_pmu_start_txn(struct pmu *pmu) | |||
1436 | { | 1436 | { |
1437 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1437 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1438 | 1438 | ||
1439 | perf_disable(); | 1439 | perf_pmu_disable(pmu); |
1440 | cpuc->group_flag |= PERF_EVENT_TXN; | 1440 | cpuc->group_flag |= PERF_EVENT_TXN; |
1441 | cpuc->n_txn = 0; | 1441 | cpuc->n_txn = 0; |
1442 | } | 1442 | } |
@@ -1456,7 +1456,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) | |||
1456 | */ | 1456 | */ |
1457 | cpuc->n_added -= cpuc->n_txn; | 1457 | cpuc->n_added -= cpuc->n_txn; |
1458 | cpuc->n_events -= cpuc->n_txn; | 1458 | cpuc->n_events -= cpuc->n_txn; |
1459 | perf_enable(); | 1459 | perf_pmu_enable(pmu); |
1460 | } | 1460 | } |
1461 | 1461 | ||
1462 | /* | 1462 | /* |
@@ -1486,7 +1486,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu) | |||
1486 | memcpy(cpuc->assign, assign, n*sizeof(int)); | 1486 | memcpy(cpuc->assign, assign, n*sizeof(int)); |
1487 | 1487 | ||
1488 | cpuc->group_flag &= ~PERF_EVENT_TXN; | 1488 | cpuc->group_flag &= ~PERF_EVENT_TXN; |
1489 | perf_enable(); | 1489 | perf_pmu_enable(pmu); |
1490 | return 0; | 1490 | return 0; |
1491 | } | 1491 | } |
1492 | 1492 | ||
@@ -1605,6 +1605,8 @@ int x86_pmu_event_init(struct perf_event *event) | |||
1605 | } | 1605 | } |
1606 | 1606 | ||
1607 | static struct pmu pmu = { | 1607 | static struct pmu pmu = { |
1608 | .pmu_enable = x86_pmu_pmu_enable, | ||
1609 | .pmu_disable = x86_pmu_pmu_disable, | ||
1608 | .event_init = x86_pmu_event_init, | 1610 | .event_init = x86_pmu_event_init, |
1609 | .enable = x86_pmu_enable, | 1611 | .enable = x86_pmu_enable, |
1610 | .disable = x86_pmu_disable, | 1612 | .disable = x86_pmu_disable, |