diff options
Diffstat (limited to 'arch/powerpc')
| -rw-r--r-- | arch/powerpc/kernel/module.c | 6 | ||||
| -rw-r--r-- | arch/powerpc/kernel/perf_callchain.c | 86 | ||||
| -rw-r--r-- | arch/powerpc/kernel/perf_event.c | 164 | ||||
| -rw-r--r-- | arch/powerpc/kernel/perf_event_fsl_emb.c | 148 | ||||
| -rw-r--r-- | arch/powerpc/platforms/512x/clock.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/platforms/52xx/efika.c | 9 | ||||
| -rw-r--r-- | arch/powerpc/platforms/52xx/mpc52xx_common.c | 8 |
7 files changed, 229 insertions, 194 deletions
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 477c663e0140..49cee9df225b 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c | |||
| @@ -63,11 +63,6 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
| 63 | const Elf_Shdr *sechdrs, struct module *me) | 63 | const Elf_Shdr *sechdrs, struct module *me) |
| 64 | { | 64 | { |
| 65 | const Elf_Shdr *sect; | 65 | const Elf_Shdr *sect; |
| 66 | int err; | ||
| 67 | |||
| 68 | err = module_bug_finalize(hdr, sechdrs, me); | ||
| 69 | if (err) | ||
| 70 | return err; | ||
| 71 | 66 | ||
| 72 | /* Apply feature fixups */ | 67 | /* Apply feature fixups */ |
| 73 | sect = find_section(hdr, sechdrs, "__ftr_fixup"); | 68 | sect = find_section(hdr, sechdrs, "__ftr_fixup"); |
| @@ -101,5 +96,4 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
| 101 | 96 | ||
| 102 | void module_arch_cleanup(struct module *mod) | 97 | void module_arch_cleanup(struct module *mod) |
| 103 | { | 98 | { |
| 104 | module_bug_cleanup(mod); | ||
| 105 | } | 99 | } |
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index 95ad9dad298e..d05ae4204bbf 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c | |||
| @@ -23,18 +23,6 @@ | |||
| 23 | #include "ppc32.h" | 23 | #include "ppc32.h" |
| 24 | #endif | 24 | #endif |
| 25 | 25 | ||
| 26 | /* | ||
| 27 | * Store another value in a callchain_entry. | ||
| 28 | */ | ||
| 29 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
| 30 | { | ||
| 31 | unsigned int nr = entry->nr; | ||
| 32 | |||
| 33 | if (nr < PERF_MAX_STACK_DEPTH) { | ||
| 34 | entry->ip[nr] = ip; | ||
| 35 | entry->nr = nr + 1; | ||
| 36 | } | ||
| 37 | } | ||
| 38 | 26 | ||
| 39 | /* | 27 | /* |
| 40 | * Is sp valid as the address of the next kernel stack frame after prev_sp? | 28 | * Is sp valid as the address of the next kernel stack frame after prev_sp? |
| @@ -58,8 +46,8 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) | |||
| 58 | return 0; | 46 | return 0; |
| 59 | } | 47 | } |
| 60 | 48 | ||
| 61 | static void perf_callchain_kernel(struct pt_regs *regs, | 49 | void |
| 62 | struct perf_callchain_entry *entry) | 50 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) |
| 63 | { | 51 | { |
| 64 | unsigned long sp, next_sp; | 52 | unsigned long sp, next_sp; |
| 65 | unsigned long next_ip; | 53 | unsigned long next_ip; |
| @@ -69,8 +57,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
| 69 | 57 | ||
| 70 | lr = regs->link; | 58 | lr = regs->link; |
| 71 | sp = regs->gpr[1]; | 59 | sp = regs->gpr[1]; |
| 72 | callchain_store(entry, PERF_CONTEXT_KERNEL); | 60 | perf_callchain_store(entry, regs->nip); |
| 73 | callchain_store(entry, regs->nip); | ||
| 74 | 61 | ||
| 75 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | 62 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) |
| 76 | return; | 63 | return; |
| @@ -89,7 +76,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
| 89 | next_ip = regs->nip; | 76 | next_ip = regs->nip; |
| 90 | lr = regs->link; | 77 | lr = regs->link; |
| 91 | level = 0; | 78 | level = 0; |
| 92 | callchain_store(entry, PERF_CONTEXT_KERNEL); | 79 | perf_callchain_store(entry, PERF_CONTEXT_KERNEL); |
| 93 | 80 | ||
| 94 | } else { | 81 | } else { |
| 95 | if (level == 0) | 82 | if (level == 0) |
| @@ -111,7 +98,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
| 111 | ++level; | 98 | ++level; |
| 112 | } | 99 | } |
| 113 | 100 | ||
| 114 | callchain_store(entry, next_ip); | 101 | perf_callchain_store(entry, next_ip); |
| 115 | if (!valid_next_sp(next_sp, sp)) | 102 | if (!valid_next_sp(next_sp, sp)) |
| 116 | return; | 103 | return; |
| 117 | sp = next_sp; | 104 | sp = next_sp; |
| @@ -233,8 +220,8 @@ static int sane_signal_64_frame(unsigned long sp) | |||
| 233 | puc == (unsigned long) &sf->uc; | 220 | puc == (unsigned long) &sf->uc; |
| 234 | } | 221 | } |
| 235 | 222 | ||
| 236 | static void perf_callchain_user_64(struct pt_regs *regs, | 223 | static void perf_callchain_user_64(struct perf_callchain_entry *entry, |
| 237 | struct perf_callchain_entry *entry) | 224 | struct pt_regs *regs) |
| 238 | { | 225 | { |
| 239 | unsigned long sp, next_sp; | 226 | unsigned long sp, next_sp; |
| 240 | unsigned long next_ip; | 227 | unsigned long next_ip; |
| @@ -246,8 +233,7 @@ static void perf_callchain_user_64(struct pt_regs *regs, | |||
| 246 | next_ip = regs->nip; | 233 | next_ip = regs->nip; |
| 247 | lr = regs->link; | 234 | lr = regs->link; |
| 248 | sp = regs->gpr[1]; | 235 | sp = regs->gpr[1]; |
| 249 | callchain_store(entry, PERF_CONTEXT_USER); | 236 | perf_callchain_store(entry, next_ip); |
| 250 | callchain_store(entry, next_ip); | ||
| 251 | 237 | ||
| 252 | for (;;) { | 238 | for (;;) { |
| 253 | fp = (unsigned long __user *) sp; | 239 | fp = (unsigned long __user *) sp; |
| @@ -276,14 +262,14 @@ static void perf_callchain_user_64(struct pt_regs *regs, | |||
| 276 | read_user_stack_64(&uregs[PT_R1], &sp)) | 262 | read_user_stack_64(&uregs[PT_R1], &sp)) |
| 277 | return; | 263 | return; |
| 278 | level = 0; | 264 | level = 0; |
| 279 | callchain_store(entry, PERF_CONTEXT_USER); | 265 | perf_callchain_store(entry, PERF_CONTEXT_USER); |
| 280 | callchain_store(entry, next_ip); | 266 | perf_callchain_store(entry, next_ip); |
| 281 | continue; | 267 | continue; |
| 282 | } | 268 | } |
| 283 | 269 | ||
| 284 | if (level == 0) | 270 | if (level == 0) |
| 285 | next_ip = lr; | 271 | next_ip = lr; |
| 286 | callchain_store(entry, next_ip); | 272 | perf_callchain_store(entry, next_ip); |
| 287 | ++level; | 273 | ++level; |
| 288 | sp = next_sp; | 274 | sp = next_sp; |
| 289 | } | 275 | } |
| @@ -315,8 +301,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | |||
| 315 | return __get_user_inatomic(*ret, ptr); | 301 | return __get_user_inatomic(*ret, ptr); |
| 316 | } | 302 | } |
| 317 | 303 | ||
| 318 | static inline void perf_callchain_user_64(struct pt_regs *regs, | 304 | static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, |
| 319 | struct perf_callchain_entry *entry) | 305 | struct pt_regs *regs) |
| 320 | { | 306 | { |
| 321 | } | 307 | } |
| 322 | 308 | ||
| @@ -435,8 +421,8 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp, | |||
| 435 | return mctx->mc_gregs; | 421 | return mctx->mc_gregs; |
| 436 | } | 422 | } |
| 437 | 423 | ||
| 438 | static void perf_callchain_user_32(struct pt_regs *regs, | 424 | static void perf_callchain_user_32(struct perf_callchain_entry *entry, |
| 439 | struct perf_callchain_entry *entry) | 425 | struct pt_regs *regs) |
| 440 | { | 426 | { |
| 441 | unsigned int sp, next_sp; | 427 | unsigned int sp, next_sp; |
| 442 | unsigned int next_ip; | 428 | unsigned int next_ip; |
| @@ -447,8 +433,7 @@ static void perf_callchain_user_32(struct pt_regs *regs, | |||
| 447 | next_ip = regs->nip; | 433 | next_ip = regs->nip; |
| 448 | lr = regs->link; | 434 | lr = regs->link; |
| 449 | sp = regs->gpr[1]; | 435 | sp = regs->gpr[1]; |
| 450 | callchain_store(entry, PERF_CONTEXT_USER); | 436 | perf_callchain_store(entry, next_ip); |
| 451 | callchain_store(entry, next_ip); | ||
| 452 | 437 | ||
| 453 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | 438 | while (entry->nr < PERF_MAX_STACK_DEPTH) { |
| 454 | fp = (unsigned int __user *) (unsigned long) sp; | 439 | fp = (unsigned int __user *) (unsigned long) sp; |
| @@ -470,45 +455,24 @@ static void perf_callchain_user_32(struct pt_regs *regs, | |||
| 470 | read_user_stack_32(&uregs[PT_R1], &sp)) | 455 | read_user_stack_32(&uregs[PT_R1], &sp)) |
| 471 | return; | 456 | return; |
| 472 | level = 0; | 457 | level = 0; |
| 473 | callchain_store(entry, PERF_CONTEXT_USER); | 458 | perf_callchain_store(entry, PERF_CONTEXT_USER); |
| 474 | callchain_store(entry, next_ip); | 459 | perf_callchain_store(entry, next_ip); |
| 475 | continue; | 460 | continue; |
| 476 | } | 461 | } |
| 477 | 462 | ||
| 478 | if (level == 0) | 463 | if (level == 0) |
| 479 | next_ip = lr; | 464 | next_ip = lr; |
| 480 | callchain_store(entry, next_ip); | 465 | perf_callchain_store(entry, next_ip); |
| 481 | ++level; | 466 | ++level; |
| 482 | sp = next_sp; | 467 | sp = next_sp; |
| 483 | } | 468 | } |
| 484 | } | 469 | } |
| 485 | 470 | ||
| 486 | /* | 471 | void |
| 487 | * Since we can't get PMU interrupts inside a PMU interrupt handler, | 472 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) |
| 488 | * we don't need separate irq and nmi entries here. | ||
| 489 | */ | ||
| 490 | static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain); | ||
| 491 | |||
| 492 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
| 493 | { | 473 | { |
| 494 | struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain); | 474 | if (current_is_64bit()) |
| 495 | 475 | perf_callchain_user_64(entry, regs); | |
| 496 | entry->nr = 0; | 476 | else |
| 497 | 477 | perf_callchain_user_32(entry, regs); | |
| 498 | if (!user_mode(regs)) { | ||
| 499 | perf_callchain_kernel(regs, entry); | ||
| 500 | if (current->mm) | ||
| 501 | regs = task_pt_regs(current); | ||
| 502 | else | ||
| 503 | regs = NULL; | ||
| 504 | } | ||
| 505 | |||
| 506 | if (regs) { | ||
| 507 | if (current_is_64bit()) | ||
| 508 | perf_callchain_user_64(regs, entry); | ||
| 509 | else | ||
| 510 | perf_callchain_user_32(regs, entry); | ||
| 511 | } | ||
| 512 | |||
| 513 | return entry; | ||
| 514 | } | 478 | } |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index d301a30445e0..9cb4924b6c07 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
| @@ -402,6 +402,9 @@ static void power_pmu_read(struct perf_event *event) | |||
| 402 | { | 402 | { |
| 403 | s64 val, delta, prev; | 403 | s64 val, delta, prev; |
| 404 | 404 | ||
| 405 | if (event->hw.state & PERF_HES_STOPPED) | ||
| 406 | return; | ||
| 407 | |||
| 405 | if (!event->hw.idx) | 408 | if (!event->hw.idx) |
| 406 | return; | 409 | return; |
| 407 | /* | 410 | /* |
| @@ -517,7 +520,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) | |||
| 517 | * Disable all events to prevent PMU interrupts and to allow | 520 | * Disable all events to prevent PMU interrupts and to allow |
| 518 | * events to be added or removed. | 521 | * events to be added or removed. |
| 519 | */ | 522 | */ |
| 520 | void hw_perf_disable(void) | 523 | static void power_pmu_disable(struct pmu *pmu) |
| 521 | { | 524 | { |
| 522 | struct cpu_hw_events *cpuhw; | 525 | struct cpu_hw_events *cpuhw; |
| 523 | unsigned long flags; | 526 | unsigned long flags; |
| @@ -565,7 +568,7 @@ void hw_perf_disable(void) | |||
| 565 | * If we were previously disabled and events were added, then | 568 | * If we were previously disabled and events were added, then |
| 566 | * put the new config on the PMU. | 569 | * put the new config on the PMU. |
| 567 | */ | 570 | */ |
| 568 | void hw_perf_enable(void) | 571 | static void power_pmu_enable(struct pmu *pmu) |
| 569 | { | 572 | { |
| 570 | struct perf_event *event; | 573 | struct perf_event *event; |
| 571 | struct cpu_hw_events *cpuhw; | 574 | struct cpu_hw_events *cpuhw; |
| @@ -672,6 +675,8 @@ void hw_perf_enable(void) | |||
| 672 | } | 675 | } |
| 673 | local64_set(&event->hw.prev_count, val); | 676 | local64_set(&event->hw.prev_count, val); |
| 674 | event->hw.idx = idx; | 677 | event->hw.idx = idx; |
| 678 | if (event->hw.state & PERF_HES_STOPPED) | ||
| 679 | val = 0; | ||
| 675 | write_pmc(idx, val); | 680 | write_pmc(idx, val); |
| 676 | perf_event_update_userpage(event); | 681 | perf_event_update_userpage(event); |
| 677 | } | 682 | } |
| @@ -727,7 +732,7 @@ static int collect_events(struct perf_event *group, int max_count, | |||
| 727 | * re-enable the PMU in order to get hw_perf_enable to do the | 732 | * re-enable the PMU in order to get hw_perf_enable to do the |
| 728 | * actual work of reconfiguring the PMU. | 733 | * actual work of reconfiguring the PMU. |
| 729 | */ | 734 | */ |
| 730 | static int power_pmu_enable(struct perf_event *event) | 735 | static int power_pmu_add(struct perf_event *event, int ef_flags) |
| 731 | { | 736 | { |
| 732 | struct cpu_hw_events *cpuhw; | 737 | struct cpu_hw_events *cpuhw; |
| 733 | unsigned long flags; | 738 | unsigned long flags; |
| @@ -735,7 +740,7 @@ static int power_pmu_enable(struct perf_event *event) | |||
| 735 | int ret = -EAGAIN; | 740 | int ret = -EAGAIN; |
| 736 | 741 | ||
| 737 | local_irq_save(flags); | 742 | local_irq_save(flags); |
| 738 | perf_disable(); | 743 | perf_pmu_disable(event->pmu); |
| 739 | 744 | ||
| 740 | /* | 745 | /* |
| 741 | * Add the event to the list (if there is room) | 746 | * Add the event to the list (if there is room) |
| @@ -749,6 +754,9 @@ static int power_pmu_enable(struct perf_event *event) | |||
| 749 | cpuhw->events[n0] = event->hw.config; | 754 | cpuhw->events[n0] = event->hw.config; |
| 750 | cpuhw->flags[n0] = event->hw.event_base; | 755 | cpuhw->flags[n0] = event->hw.event_base; |
| 751 | 756 | ||
| 757 | if (!(ef_flags & PERF_EF_START)) | ||
| 758 | event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
| 759 | |||
| 752 | /* | 760 | /* |
| 753 | * If group events scheduling transaction was started, | 761 | * If group events scheduling transaction was started, |
| 754 | * skip the schedulability test here, it will be peformed | 762 | * skip the schedulability test here, it will be peformed |
| @@ -769,7 +777,7 @@ nocheck: | |||
| 769 | 777 | ||
| 770 | ret = 0; | 778 | ret = 0; |
| 771 | out: | 779 | out: |
| 772 | perf_enable(); | 780 | perf_pmu_enable(event->pmu); |
| 773 | local_irq_restore(flags); | 781 | local_irq_restore(flags); |
| 774 | return ret; | 782 | return ret; |
| 775 | } | 783 | } |
| @@ -777,14 +785,14 @@ nocheck: | |||
| 777 | /* | 785 | /* |
| 778 | * Remove a event from the PMU. | 786 | * Remove a event from the PMU. |
| 779 | */ | 787 | */ |
| 780 | static void power_pmu_disable(struct perf_event *event) | 788 | static void power_pmu_del(struct perf_event *event, int ef_flags) |
| 781 | { | 789 | { |
| 782 | struct cpu_hw_events *cpuhw; | 790 | struct cpu_hw_events *cpuhw; |
| 783 | long i; | 791 | long i; |
| 784 | unsigned long flags; | 792 | unsigned long flags; |
| 785 | 793 | ||
| 786 | local_irq_save(flags); | 794 | local_irq_save(flags); |
| 787 | perf_disable(); | 795 | perf_pmu_disable(event->pmu); |
| 788 | 796 | ||
| 789 | power_pmu_read(event); | 797 | power_pmu_read(event); |
| 790 | 798 | ||
| @@ -821,34 +829,60 @@ static void power_pmu_disable(struct perf_event *event) | |||
| 821 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); | 829 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); |
| 822 | } | 830 | } |
| 823 | 831 | ||
| 824 | perf_enable(); | 832 | perf_pmu_enable(event->pmu); |
| 825 | local_irq_restore(flags); | 833 | local_irq_restore(flags); |
| 826 | } | 834 | } |
| 827 | 835 | ||
| 828 | /* | 836 | /* |
| 829 | * Re-enable interrupts on a event after they were throttled | 837 | * POWER-PMU does not support disabling individual counters, hence |
| 830 | * because they were coming too fast. | 838 | * program their cycle counter to their max value and ignore the interrupts. |
| 831 | */ | 839 | */ |
| 832 | static void power_pmu_unthrottle(struct perf_event *event) | 840 | |
| 841 | static void power_pmu_start(struct perf_event *event, int ef_flags) | ||
| 842 | { | ||
| 843 | unsigned long flags; | ||
| 844 | s64 left; | ||
| 845 | |||
| 846 | if (!event->hw.idx || !event->hw.sample_period) | ||
| 847 | return; | ||
| 848 | |||
| 849 | if (!(event->hw.state & PERF_HES_STOPPED)) | ||
| 850 | return; | ||
| 851 | |||
| 852 | if (ef_flags & PERF_EF_RELOAD) | ||
| 853 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
| 854 | |||
| 855 | local_irq_save(flags); | ||
| 856 | perf_pmu_disable(event->pmu); | ||
| 857 | |||
| 858 | event->hw.state = 0; | ||
| 859 | left = local64_read(&event->hw.period_left); | ||
| 860 | write_pmc(event->hw.idx, left); | ||
| 861 | |||
| 862 | perf_event_update_userpage(event); | ||
| 863 | perf_pmu_enable(event->pmu); | ||
| 864 | local_irq_restore(flags); | ||
| 865 | } | ||
| 866 | |||
| 867 | static void power_pmu_stop(struct perf_event *event, int ef_flags) | ||
| 833 | { | 868 | { |
| 834 | s64 val, left; | ||
| 835 | unsigned long flags; | 869 | unsigned long flags; |
| 836 | 870 | ||
| 837 | if (!event->hw.idx || !event->hw.sample_period) | 871 | if (!event->hw.idx || !event->hw.sample_period) |
| 838 | return; | 872 | return; |
| 873 | |||
| 874 | if (event->hw.state & PERF_HES_STOPPED) | ||
| 875 | return; | ||
| 876 | |||
| 839 | local_irq_save(flags); | 877 | local_irq_save(flags); |
| 840 | perf_disable(); | 878 | perf_pmu_disable(event->pmu); |
| 879 | |||
| 841 | power_pmu_read(event); | 880 | power_pmu_read(event); |
| 842 | left = event->hw.sample_period; | 881 | event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
| 843 | event->hw.last_period = left; | 882 | write_pmc(event->hw.idx, 0); |
| 844 | val = 0; | 883 | |
| 845 | if (left < 0x80000000L) | ||
| 846 | val = 0x80000000L - left; | ||
| 847 | write_pmc(event->hw.idx, val); | ||
| 848 | local64_set(&event->hw.prev_count, val); | ||
| 849 | local64_set(&event->hw.period_left, left); | ||
| 850 | perf_event_update_userpage(event); | 884 | perf_event_update_userpage(event); |
| 851 | perf_enable(); | 885 | perf_pmu_enable(event->pmu); |
| 852 | local_irq_restore(flags); | 886 | local_irq_restore(flags); |
| 853 | } | 887 | } |
| 854 | 888 | ||
| @@ -857,10 +891,11 @@ static void power_pmu_unthrottle(struct perf_event *event) | |||
| 857 | * Set the flag to make pmu::enable() not perform the | 891 | * Set the flag to make pmu::enable() not perform the |
| 858 | * schedulability test, it will be performed at commit time | 892 | * schedulability test, it will be performed at commit time |
| 859 | */ | 893 | */ |
| 860 | void power_pmu_start_txn(const struct pmu *pmu) | 894 | void power_pmu_start_txn(struct pmu *pmu) |
| 861 | { | 895 | { |
| 862 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 896 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
| 863 | 897 | ||
| 898 | perf_pmu_disable(pmu); | ||
| 864 | cpuhw->group_flag |= PERF_EVENT_TXN; | 899 | cpuhw->group_flag |= PERF_EVENT_TXN; |
| 865 | cpuhw->n_txn_start = cpuhw->n_events; | 900 | cpuhw->n_txn_start = cpuhw->n_events; |
| 866 | } | 901 | } |
| @@ -870,11 +905,12 @@ void power_pmu_start_txn(const struct pmu *pmu) | |||
| 870 | * Clear the flag and pmu::enable() will perform the | 905 | * Clear the flag and pmu::enable() will perform the |
| 871 | * schedulability test. | 906 | * schedulability test. |
| 872 | */ | 907 | */ |
| 873 | void power_pmu_cancel_txn(const struct pmu *pmu) | 908 | void power_pmu_cancel_txn(struct pmu *pmu) |
| 874 | { | 909 | { |
| 875 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 910 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
| 876 | 911 | ||
| 877 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 912 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
| 913 | perf_pmu_enable(pmu); | ||
| 878 | } | 914 | } |
| 879 | 915 | ||
| 880 | /* | 916 | /* |
| @@ -882,7 +918,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu) | |||
| 882 | * Perform the group schedulability test as a whole | 918 | * Perform the group schedulability test as a whole |
| 883 | * Return 0 if success | 919 | * Return 0 if success |
| 884 | */ | 920 | */ |
| 885 | int power_pmu_commit_txn(const struct pmu *pmu) | 921 | int power_pmu_commit_txn(struct pmu *pmu) |
| 886 | { | 922 | { |
| 887 | struct cpu_hw_events *cpuhw; | 923 | struct cpu_hw_events *cpuhw; |
| 888 | long i, n; | 924 | long i, n; |
| @@ -901,19 +937,10 @@ int power_pmu_commit_txn(const struct pmu *pmu) | |||
| 901 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | 937 | cpuhw->event[i]->hw.config = cpuhw->events[i]; |
| 902 | 938 | ||
| 903 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 939 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
| 940 | perf_pmu_enable(pmu); | ||
| 904 | return 0; | 941 | return 0; |
| 905 | } | 942 | } |
| 906 | 943 | ||
| 907 | struct pmu power_pmu = { | ||
| 908 | .enable = power_pmu_enable, | ||
| 909 | .disable = power_pmu_disable, | ||
| 910 | .read = power_pmu_read, | ||
| 911 | .unthrottle = power_pmu_unthrottle, | ||
| 912 | .start_txn = power_pmu_start_txn, | ||
| 913 | .cancel_txn = power_pmu_cancel_txn, | ||
| 914 | .commit_txn = power_pmu_commit_txn, | ||
| 915 | }; | ||
| 916 | |||
| 917 | /* | 944 | /* |
| 918 | * Return 1 if we might be able to put event on a limited PMC, | 945 | * Return 1 if we might be able to put event on a limited PMC, |
| 919 | * or 0 if not. | 946 | * or 0 if not. |
| @@ -1014,7 +1041,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) | |||
| 1014 | return 0; | 1041 | return 0; |
| 1015 | } | 1042 | } |
| 1016 | 1043 | ||
| 1017 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 1044 | static int power_pmu_event_init(struct perf_event *event) |
| 1018 | { | 1045 | { |
| 1019 | u64 ev; | 1046 | u64 ev; |
| 1020 | unsigned long flags; | 1047 | unsigned long flags; |
| @@ -1026,25 +1053,27 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
| 1026 | struct cpu_hw_events *cpuhw; | 1053 | struct cpu_hw_events *cpuhw; |
| 1027 | 1054 | ||
| 1028 | if (!ppmu) | 1055 | if (!ppmu) |
| 1029 | return ERR_PTR(-ENXIO); | 1056 | return -ENOENT; |
| 1057 | |||
| 1030 | switch (event->attr.type) { | 1058 | switch (event->attr.type) { |
| 1031 | case PERF_TYPE_HARDWARE: | 1059 | case PERF_TYPE_HARDWARE: |
| 1032 | ev = event->attr.config; | 1060 | ev = event->attr.config; |
| 1033 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | 1061 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) |
| 1034 | return ERR_PTR(-EOPNOTSUPP); | 1062 | return -EOPNOTSUPP; |
| 1035 | ev = ppmu->generic_events[ev]; | 1063 | ev = ppmu->generic_events[ev]; |
| 1036 | break; | 1064 | break; |
| 1037 | case PERF_TYPE_HW_CACHE: | 1065 | case PERF_TYPE_HW_CACHE: |
| 1038 | err = hw_perf_cache_event(event->attr.config, &ev); | 1066 | err = hw_perf_cache_event(event->attr.config, &ev); |
| 1039 | if (err) | 1067 | if (err) |
| 1040 | return ERR_PTR(err); | 1068 | return err; |
| 1041 | break; | 1069 | break; |
| 1042 | case PERF_TYPE_RAW: | 1070 | case PERF_TYPE_RAW: |
| 1043 | ev = event->attr.config; | 1071 | ev = event->attr.config; |
| 1044 | break; | 1072 | break; |
| 1045 | default: | 1073 | default: |
| 1046 | return ERR_PTR(-EINVAL); | 1074 | return -ENOENT; |
| 1047 | } | 1075 | } |
| 1076 | |||
| 1048 | event->hw.config_base = ev; | 1077 | event->hw.config_base = ev; |
| 1049 | event->hw.idx = 0; | 1078 | event->hw.idx = 0; |
| 1050 | 1079 | ||
| @@ -1081,7 +1110,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
| 1081 | */ | 1110 | */ |
| 1082 | ev = normal_pmc_alternative(ev, flags); | 1111 | ev = normal_pmc_alternative(ev, flags); |
| 1083 | if (!ev) | 1112 | if (!ev) |
| 1084 | return ERR_PTR(-EINVAL); | 1113 | return -EINVAL; |
| 1085 | } | 1114 | } |
| 1086 | } | 1115 | } |
| 1087 | 1116 | ||
| @@ -1095,19 +1124,19 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
| 1095 | n = collect_events(event->group_leader, ppmu->n_counter - 1, | 1124 | n = collect_events(event->group_leader, ppmu->n_counter - 1, |
| 1096 | ctrs, events, cflags); | 1125 | ctrs, events, cflags); |
| 1097 | if (n < 0) | 1126 | if (n < 0) |
| 1098 | return ERR_PTR(-EINVAL); | 1127 | return -EINVAL; |
| 1099 | } | 1128 | } |
| 1100 | events[n] = ev; | 1129 | events[n] = ev; |
| 1101 | ctrs[n] = event; | 1130 | ctrs[n] = event; |
| 1102 | cflags[n] = flags; | 1131 | cflags[n] = flags; |
| 1103 | if (check_excludes(ctrs, cflags, n, 1)) | 1132 | if (check_excludes(ctrs, cflags, n, 1)) |
| 1104 | return ERR_PTR(-EINVAL); | 1133 | return -EINVAL; |
| 1105 | 1134 | ||
| 1106 | cpuhw = &get_cpu_var(cpu_hw_events); | 1135 | cpuhw = &get_cpu_var(cpu_hw_events); |
| 1107 | err = power_check_constraints(cpuhw, events, cflags, n + 1); | 1136 | err = power_check_constraints(cpuhw, events, cflags, n + 1); |
| 1108 | put_cpu_var(cpu_hw_events); | 1137 | put_cpu_var(cpu_hw_events); |
| 1109 | if (err) | 1138 | if (err) |
| 1110 | return ERR_PTR(-EINVAL); | 1139 | return -EINVAL; |
| 1111 | 1140 | ||
| 1112 | event->hw.config = events[n]; | 1141 | event->hw.config = events[n]; |
| 1113 | event->hw.event_base = cflags[n]; | 1142 | event->hw.event_base = cflags[n]; |
| @@ -1132,11 +1161,23 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
| 1132 | } | 1161 | } |
| 1133 | event->destroy = hw_perf_event_destroy; | 1162 | event->destroy = hw_perf_event_destroy; |
| 1134 | 1163 | ||
| 1135 | if (err) | 1164 | return err; |
| 1136 | return ERR_PTR(err); | ||
| 1137 | return &power_pmu; | ||
| 1138 | } | 1165 | } |
| 1139 | 1166 | ||
| 1167 | struct pmu power_pmu = { | ||
| 1168 | .pmu_enable = power_pmu_enable, | ||
| 1169 | .pmu_disable = power_pmu_disable, | ||
| 1170 | .event_init = power_pmu_event_init, | ||
| 1171 | .add = power_pmu_add, | ||
| 1172 | .del = power_pmu_del, | ||
| 1173 | .start = power_pmu_start, | ||
| 1174 | .stop = power_pmu_stop, | ||
| 1175 | .read = power_pmu_read, | ||
| 1176 | .start_txn = power_pmu_start_txn, | ||
| 1177 | .cancel_txn = power_pmu_cancel_txn, | ||
| 1178 | .commit_txn = power_pmu_commit_txn, | ||
| 1179 | }; | ||
| 1180 | |||
| 1140 | /* | 1181 | /* |
| 1141 | * A counter has overflowed; update its count and record | 1182 | * A counter has overflowed; update its count and record |
| 1142 | * things if requested. Note that interrupts are hard-disabled | 1183 | * things if requested. Note that interrupts are hard-disabled |
| @@ -1149,6 +1190,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
| 1149 | s64 prev, delta, left; | 1190 | s64 prev, delta, left; |
| 1150 | int record = 0; | 1191 | int record = 0; |
| 1151 | 1192 | ||
| 1193 | if (event->hw.state & PERF_HES_STOPPED) { | ||
| 1194 | write_pmc(event->hw.idx, 0); | ||
| 1195 | return; | ||
| 1196 | } | ||
| 1197 | |||
| 1152 | /* we don't have to worry about interrupts here */ | 1198 | /* we don't have to worry about interrupts here */ |
| 1153 | prev = local64_read(&event->hw.prev_count); | 1199 | prev = local64_read(&event->hw.prev_count); |
| 1154 | delta = (val - prev) & 0xfffffffful; | 1200 | delta = (val - prev) & 0xfffffffful; |
| @@ -1171,6 +1217,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
| 1171 | val = 0x80000000LL - left; | 1217 | val = 0x80000000LL - left; |
| 1172 | } | 1218 | } |
| 1173 | 1219 | ||
| 1220 | write_pmc(event->hw.idx, val); | ||
| 1221 | local64_set(&event->hw.prev_count, val); | ||
| 1222 | local64_set(&event->hw.period_left, left); | ||
| 1223 | perf_event_update_userpage(event); | ||
| 1224 | |||
| 1174 | /* | 1225 | /* |
| 1175 | * Finally record data if requested. | 1226 | * Finally record data if requested. |
| 1176 | */ | 1227 | */ |
| @@ -1183,23 +1234,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
| 1183 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) | 1234 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) |
| 1184 | perf_get_data_addr(regs, &data.addr); | 1235 | perf_get_data_addr(regs, &data.addr); |
| 1185 | 1236 | ||
| 1186 | if (perf_event_overflow(event, nmi, &data, regs)) { | 1237 | if (perf_event_overflow(event, nmi, &data, regs)) |
| 1187 | /* | 1238 | power_pmu_stop(event, 0); |
| 1188 | * Interrupts are coming too fast - throttle them | ||
| 1189 | * by setting the event to 0, so it will be | ||
| 1190 | * at least 2^30 cycles until the next interrupt | ||
| 1191 | * (assuming each event counts at most 2 counts | ||
| 1192 | * per cycle). | ||
| 1193 | */ | ||
| 1194 | val = 0; | ||
| 1195 | left = ~0ULL >> 1; | ||
| 1196 | } | ||
| 1197 | } | 1239 | } |
| 1198 | |||
| 1199 | write_pmc(event->hw.idx, val); | ||
| 1200 | local64_set(&event->hw.prev_count, val); | ||
| 1201 | local64_set(&event->hw.period_left, left); | ||
| 1202 | perf_event_update_userpage(event); | ||
| 1203 | } | 1240 | } |
| 1204 | 1241 | ||
| 1205 | /* | 1242 | /* |
| @@ -1342,6 +1379,7 @@ int register_power_pmu(struct power_pmu *pmu) | |||
| 1342 | freeze_events_kernel = MMCR0_FCHV; | 1379 | freeze_events_kernel = MMCR0_FCHV; |
| 1343 | #endif /* CONFIG_PPC64 */ | 1380 | #endif /* CONFIG_PPC64 */ |
| 1344 | 1381 | ||
| 1382 | perf_pmu_register(&power_pmu); | ||
| 1345 | perf_cpu_notifier(power_pmu_notifier); | 1383 | perf_cpu_notifier(power_pmu_notifier); |
| 1346 | 1384 | ||
| 1347 | return 0; | 1385 | return 0; |
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index 1ba45471ae43..7ecca59ddf77 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c | |||
| @@ -156,6 +156,9 @@ static void fsl_emb_pmu_read(struct perf_event *event) | |||
| 156 | { | 156 | { |
| 157 | s64 val, delta, prev; | 157 | s64 val, delta, prev; |
| 158 | 158 | ||
| 159 | if (event->hw.state & PERF_HES_STOPPED) | ||
| 160 | return; | ||
| 161 | |||
| 159 | /* | 162 | /* |
| 160 | * Performance monitor interrupts come even when interrupts | 163 | * Performance monitor interrupts come even when interrupts |
| 161 | * are soft-disabled, as long as interrupts are hard-enabled. | 164 | * are soft-disabled, as long as interrupts are hard-enabled. |
| @@ -177,7 +180,7 @@ static void fsl_emb_pmu_read(struct perf_event *event) | |||
| 177 | * Disable all events to prevent PMU interrupts and to allow | 180 | * Disable all events to prevent PMU interrupts and to allow |
| 178 | * events to be added or removed. | 181 | * events to be added or removed. |
| 179 | */ | 182 | */ |
| 180 | void hw_perf_disable(void) | 183 | static void fsl_emb_pmu_disable(struct pmu *pmu) |
| 181 | { | 184 | { |
| 182 | struct cpu_hw_events *cpuhw; | 185 | struct cpu_hw_events *cpuhw; |
| 183 | unsigned long flags; | 186 | unsigned long flags; |
| @@ -216,7 +219,7 @@ void hw_perf_disable(void) | |||
| 216 | * If we were previously disabled and events were added, then | 219 | * If we were previously disabled and events were added, then |
| 217 | * put the new config on the PMU. | 220 | * put the new config on the PMU. |
| 218 | */ | 221 | */ |
| 219 | void hw_perf_enable(void) | 222 | static void fsl_emb_pmu_enable(struct pmu *pmu) |
| 220 | { | 223 | { |
| 221 | struct cpu_hw_events *cpuhw; | 224 | struct cpu_hw_events *cpuhw; |
| 222 | unsigned long flags; | 225 | unsigned long flags; |
| @@ -262,8 +265,8 @@ static int collect_events(struct perf_event *group, int max_count, | |||
| 262 | return n; | 265 | return n; |
| 263 | } | 266 | } |
| 264 | 267 | ||
| 265 | /* perf must be disabled, context locked on entry */ | 268 | /* context locked on entry */ |
| 266 | static int fsl_emb_pmu_enable(struct perf_event *event) | 269 | static int fsl_emb_pmu_add(struct perf_event *event, int flags) |
| 267 | { | 270 | { |
| 268 | struct cpu_hw_events *cpuhw; | 271 | struct cpu_hw_events *cpuhw; |
| 269 | int ret = -EAGAIN; | 272 | int ret = -EAGAIN; |
| @@ -271,6 +274,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
| 271 | u64 val; | 274 | u64 val; |
| 272 | int i; | 275 | int i; |
| 273 | 276 | ||
| 277 | perf_pmu_disable(event->pmu); | ||
| 274 | cpuhw = &get_cpu_var(cpu_hw_events); | 278 | cpuhw = &get_cpu_var(cpu_hw_events); |
| 275 | 279 | ||
| 276 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) | 280 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) |
| @@ -301,6 +305,12 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
| 301 | val = 0x80000000L - left; | 305 | val = 0x80000000L - left; |
| 302 | } | 306 | } |
| 303 | local64_set(&event->hw.prev_count, val); | 307 | local64_set(&event->hw.prev_count, val); |
| 308 | |||
| 309 | if (!(flags & PERF_EF_START)) { | ||
| 310 | event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
| 311 | val = 0; | ||
| 312 | } | ||
| 313 | |||
| 304 | write_pmc(i, val); | 314 | write_pmc(i, val); |
| 305 | perf_event_update_userpage(event); | 315 | perf_event_update_userpage(event); |
| 306 | 316 | ||
| @@ -310,15 +320,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
| 310 | ret = 0; | 320 | ret = 0; |
| 311 | out: | 321 | out: |
| 312 | put_cpu_var(cpu_hw_events); | 322 | put_cpu_var(cpu_hw_events); |
| 323 | perf_pmu_enable(event->pmu); | ||
| 313 | return ret; | 324 | return ret; |
| 314 | } | 325 | } |
| 315 | 326 | ||
| 316 | /* perf must be disabled, context locked on entry */ | 327 | /* context locked on entry */ |
| 317 | static void fsl_emb_pmu_disable(struct perf_event *event) | 328 | static void fsl_emb_pmu_del(struct perf_event *event, int flags) |
| 318 | { | 329 | { |
| 319 | struct cpu_hw_events *cpuhw; | 330 | struct cpu_hw_events *cpuhw; |
| 320 | int i = event->hw.idx; | 331 | int i = event->hw.idx; |
| 321 | 332 | ||
| 333 | perf_pmu_disable(event->pmu); | ||
| 322 | if (i < 0) | 334 | if (i < 0) |
| 323 | goto out; | 335 | goto out; |
| 324 | 336 | ||
| @@ -346,44 +358,57 @@ static void fsl_emb_pmu_disable(struct perf_event *event) | |||
| 346 | cpuhw->n_events--; | 358 | cpuhw->n_events--; |
| 347 | 359 | ||
| 348 | out: | 360 | out: |
| 361 | perf_pmu_enable(event->pmu); | ||
| 349 | put_cpu_var(cpu_hw_events); | 362 | put_cpu_var(cpu_hw_events); |
| 350 | } | 363 | } |
| 351 | 364 | ||
| 352 | /* | 365 | static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags) |
| 353 | * Re-enable interrupts on a event after they were throttled | ||
| 354 | * because they were coming too fast. | ||
| 355 | * | ||
| 356 | * Context is locked on entry, but perf is not disabled. | ||
| 357 | */ | ||
| 358 | static void fsl_emb_pmu_unthrottle(struct perf_event *event) | ||
| 359 | { | 366 | { |
| 360 | s64 val, left; | ||
| 361 | unsigned long flags; | 367 | unsigned long flags; |
| 368 | s64 left; | ||
| 362 | 369 | ||
| 363 | if (event->hw.idx < 0 || !event->hw.sample_period) | 370 | if (event->hw.idx < 0 || !event->hw.sample_period) |
| 364 | return; | 371 | return; |
| 372 | |||
| 373 | if (!(event->hw.state & PERF_HES_STOPPED)) | ||
| 374 | return; | ||
| 375 | |||
| 376 | if (ef_flags & PERF_EF_RELOAD) | ||
| 377 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
| 378 | |||
| 365 | local_irq_save(flags); | 379 | local_irq_save(flags); |
| 366 | perf_disable(); | 380 | perf_pmu_disable(event->pmu); |
| 367 | fsl_emb_pmu_read(event); | 381 | |
| 368 | left = event->hw.sample_period; | 382 | event->hw.state = 0; |
| 369 | event->hw.last_period = left; | 383 | left = local64_read(&event->hw.period_left); |
| 370 | val = 0; | 384 | write_pmc(event->hw.idx, left); |
| 371 | if (left < 0x80000000L) | 385 | |
| 372 | val = 0x80000000L - left; | ||
| 373 | write_pmc(event->hw.idx, val); | ||
| 374 | local64_set(&event->hw.prev_count, val); | ||
| 375 | local64_set(&event->hw.period_left, left); | ||
| 376 | perf_event_update_userpage(event); | 386 | perf_event_update_userpage(event); |
| 377 | perf_enable(); | 387 | perf_pmu_enable(event->pmu); |
| 378 | local_irq_restore(flags); | 388 | local_irq_restore(flags); |
| 379 | } | 389 | } |
| 380 | 390 | ||
| 381 | static struct pmu fsl_emb_pmu = { | 391 | static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags) |
| 382 | .enable = fsl_emb_pmu_enable, | 392 | { |
| 383 | .disable = fsl_emb_pmu_disable, | 393 | unsigned long flags; |
| 384 | .read = fsl_emb_pmu_read, | 394 | |
| 385 | .unthrottle = fsl_emb_pmu_unthrottle, | 395 | if (event->hw.idx < 0 || !event->hw.sample_period) |
| 386 | }; | 396 | return; |
| 397 | |||
| 398 | if (event->hw.state & PERF_HES_STOPPED) | ||
| 399 | return; | ||
| 400 | |||
| 401 | local_irq_save(flags); | ||
| 402 | perf_pmu_disable(event->pmu); | ||
| 403 | |||
| 404 | fsl_emb_pmu_read(event); | ||
| 405 | event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
| 406 | write_pmc(event->hw.idx, 0); | ||
| 407 | |||
| 408 | perf_event_update_userpage(event); | ||
| 409 | perf_pmu_enable(event->pmu); | ||
| 410 | local_irq_restore(flags); | ||
| 411 | } | ||
| 387 | 412 | ||
| 388 | /* | 413 | /* |
| 389 | * Release the PMU if this is the last perf_event. | 414 | * Release the PMU if this is the last perf_event. |
| @@ -428,7 +453,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) | |||
| 428 | return 0; | 453 | return 0; |
| 429 | } | 454 | } |
| 430 | 455 | ||
| 431 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 456 | static int fsl_emb_pmu_event_init(struct perf_event *event) |
| 432 | { | 457 | { |
| 433 | u64 ev; | 458 | u64 ev; |
| 434 | struct perf_event *events[MAX_HWEVENTS]; | 459 | struct perf_event *events[MAX_HWEVENTS]; |
| @@ -441,14 +466,14 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
| 441 | case PERF_TYPE_HARDWARE: | 466 | case PERF_TYPE_HARDWARE: |
| 442 | ev = event->attr.config; | 467 | ev = event->attr.config; |
| 443 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | 468 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) |
| 444 | return ERR_PTR(-EOPNOTSUPP); | 469 | return -EOPNOTSUPP; |
| 445 | ev = ppmu->generic_events[ev]; | 470 | ev = ppmu->generic_events[ev]; |
| 446 | break; | 471 | break; |
| 447 | 472 | ||
| 448 | case PERF_TYPE_HW_CACHE: | 473 | case PERF_TYPE_HW_CACHE: |
| 449 | err = hw_perf_cache_event(event->attr.config, &ev); | 474 | err = hw_perf_cache_event(event->attr.config, &ev); |
| 450 | if (err) | 475 | if (err) |
| 451 | return ERR_PTR(err); | 476 | return err; |
| 452 | break; | 477 | break; |
| 453 | 478 | ||
| 454 | case PERF_TYPE_RAW: | 479 | case PERF_TYPE_RAW: |
| @@ -456,12 +481,12 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
| 456 | break; | 481 | break; |
| 457 | 482 | ||
| 458 | default: | 483 | default: |
| 459 | return ERR_PTR(-EINVAL); | 484 | return -ENOENT; |
| 460 | } | 485 | } |
| 461 | 486 | ||
| 462 | event->hw.config = ppmu->xlate_event(ev); | 487 | event->hw.config = ppmu->xlate_event(ev); |
| 463 | if (!(event->hw.config & FSL_EMB_EVENT_VALID)) | 488 | if (!(event->hw.config & FSL_EMB_EVENT_VALID)) |
| 464 | return ERR_PTR(-EINVAL); | 489 | return -EINVAL; |
| 465 | 490 | ||
| 466 | /* | 491 | /* |
| 467 | * If this is in a group, check if it can go on with all the | 492 | * If this is in a group, check if it can go on with all the |
| @@ -473,7 +498,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
| 473 | n = collect_events(event->group_leader, | 498 | n = collect_events(event->group_leader, |
| 474 | ppmu->n_counter - 1, events); | 499 | ppmu->n_counter - 1, events); |
| 475 | if (n < 0) | 500 | if (n < 0) |
| 476 | return ERR_PTR(-EINVAL); | 501 | return -EINVAL; |
| 477 | } | 502 | } |
| 478 | 503 | ||
| 479 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { | 504 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { |
| @@ -484,7 +509,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
| 484 | } | 509 | } |
| 485 | 510 | ||
| 486 | if (num_restricted >= ppmu->n_restricted) | 511 | if (num_restricted >= ppmu->n_restricted) |
| 487 | return ERR_PTR(-EINVAL); | 512 | return -EINVAL; |
| 488 | } | 513 | } |
| 489 | 514 | ||
| 490 | event->hw.idx = -1; | 515 | event->hw.idx = -1; |
| @@ -497,7 +522,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
| 497 | if (event->attr.exclude_kernel) | 522 | if (event->attr.exclude_kernel) |
| 498 | event->hw.config_base |= PMLCA_FCS; | 523 | event->hw.config_base |= PMLCA_FCS; |
| 499 | if (event->attr.exclude_idle) | 524 | if (event->attr.exclude_idle) |
| 500 | return ERR_PTR(-ENOTSUPP); | 525 | return -ENOTSUPP; |
| 501 | 526 | ||
| 502 | event->hw.last_period = event->hw.sample_period; | 527 | event->hw.last_period = event->hw.sample_period; |
| 503 | local64_set(&event->hw.period_left, event->hw.last_period); | 528 | local64_set(&event->hw.period_left, event->hw.last_period); |
| @@ -523,11 +548,20 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
| 523 | } | 548 | } |
| 524 | event->destroy = hw_perf_event_destroy; | 549 | event->destroy = hw_perf_event_destroy; |
| 525 | 550 | ||
| 526 | if (err) | 551 | return err; |
| 527 | return ERR_PTR(err); | ||
| 528 | return &fsl_emb_pmu; | ||
| 529 | } | 552 | } |
| 530 | 553 | ||
| 554 | static struct pmu fsl_emb_pmu = { | ||
| 555 | .pmu_enable = fsl_emb_pmu_enable, | ||
| 556 | .pmu_disable = fsl_emb_pmu_disable, | ||
| 557 | .event_init = fsl_emb_pmu_event_init, | ||
| 558 | .add = fsl_emb_pmu_add, | ||
| 559 | .del = fsl_emb_pmu_del, | ||
| 560 | .start = fsl_emb_pmu_start, | ||
| 561 | .stop = fsl_emb_pmu_stop, | ||
| 562 | .read = fsl_emb_pmu_read, | ||
| 563 | }; | ||
| 564 | |||
| 531 | /* | 565 | /* |
| 532 | * A counter has overflowed; update its count and record | 566 | * A counter has overflowed; update its count and record |
| 533 | * things if requested. Note that interrupts are hard-disabled | 567 | * things if requested. Note that interrupts are hard-disabled |
| @@ -540,6 +574,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
| 540 | s64 prev, delta, left; | 574 | s64 prev, delta, left; |
| 541 | int record = 0; | 575 | int record = 0; |
| 542 | 576 | ||
| 577 | if (event->hw.state & PERF_HES_STOPPED) { | ||
| 578 | write_pmc(event->hw.idx, 0); | ||
| 579 | return; | ||
| 580 | } | ||
| 581 | |||
| 543 | /* we don't have to worry about interrupts here */ | 582 | /* we don't have to worry about interrupts here */ |
| 544 | prev = local64_read(&event->hw.prev_count); | 583 | prev = local64_read(&event->hw.prev_count); |
| 545 | delta = (val - prev) & 0xfffffffful; | 584 | delta = (val - prev) & 0xfffffffful; |
| @@ -562,6 +601,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
| 562 | val = 0x80000000LL - left; | 601 | val = 0x80000000LL - left; |
| 563 | } | 602 | } |
| 564 | 603 | ||
| 604 | write_pmc(event->hw.idx, val); | ||
| 605 | local64_set(&event->hw.prev_count, val); | ||
| 606 | local64_set(&event->hw.period_left, left); | ||
| 607 | perf_event_update_userpage(event); | ||
| 608 | |||
| 565 | /* | 609 | /* |
| 566 | * Finally record data if requested. | 610 | * Finally record data if requested. |
| 567 | */ | 611 | */ |
| @@ -571,23 +615,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
| 571 | perf_sample_data_init(&data, 0); | 615 | perf_sample_data_init(&data, 0); |
| 572 | data.period = event->hw.last_period; | 616 | data.period = event->hw.last_period; |
| 573 | 617 | ||
| 574 | if (perf_event_overflow(event, nmi, &data, regs)) { | 618 | if (perf_event_overflow(event, nmi, &data, regs)) |
| 575 | /* | 619 | fsl_emb_pmu_stop(event, 0); |
| 576 | * Interrupts are coming too fast - throttle them | ||
| 577 | * by setting the event to 0, so it will be | ||
| 578 | * at least 2^30 cycles until the next interrupt | ||
| 579 | * (assuming each event counts at most 2 counts | ||
| 580 | * per cycle). | ||
| 581 | */ | ||
| 582 | val = 0; | ||
| 583 | left = ~0ULL >> 1; | ||
| 584 | } | ||
| 585 | } | 620 | } |
| 586 | |||
| 587 | write_pmc(event->hw.idx, val); | ||
| 588 | local64_set(&event->hw.prev_count, val); | ||
| 589 | local64_set(&event->hw.period_left, left); | ||
| 590 | perf_event_update_userpage(event); | ||
| 591 | } | 621 | } |
| 592 | 622 | ||
| 593 | static void perf_event_interrupt(struct pt_regs *regs) | 623 | static void perf_event_interrupt(struct pt_regs *regs) |
| @@ -651,5 +681,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) | |||
| 651 | pr_info("%s performance monitor hardware support registered\n", | 681 | pr_info("%s performance monitor hardware support registered\n", |
| 652 | pmu->name); | 682 | pmu->name); |
| 653 | 683 | ||
| 684 | perf_pmu_register(&fsl_emb_pmu); | ||
| 685 | |||
| 654 | return 0; | 686 | return 0; |
| 655 | } | 687 | } |
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c index 5b243bd3eb3b..3dc2a8d262b8 100644 --- a/arch/powerpc/platforms/512x/clock.c +++ b/arch/powerpc/platforms/512x/clock.c | |||
| @@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id) | |||
| 57 | int id_match = 0; | 57 | int id_match = 0; |
| 58 | 58 | ||
| 59 | if (dev == NULL || id == NULL) | 59 | if (dev == NULL || id == NULL) |
| 60 | return NULL; | 60 | return clk; |
| 61 | 61 | ||
| 62 | mutex_lock(&clocks_mutex); | 62 | mutex_lock(&clocks_mutex); |
| 63 | list_for_each_entry(p, &clocks, node) { | 63 | list_for_each_entry(p, &clocks, node) { |
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index 45c0cb9b67e6..18c104820198 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c | |||
| @@ -99,7 +99,7 @@ static void __init efika_pcisetup(void) | |||
| 99 | if (bus_range == NULL || len < 2 * sizeof(int)) { | 99 | if (bus_range == NULL || len < 2 * sizeof(int)) { |
| 100 | printk(KERN_WARNING EFIKA_PLATFORM_NAME | 100 | printk(KERN_WARNING EFIKA_PLATFORM_NAME |
| 101 | ": Can't get bus-range for %s\n", pcictrl->full_name); | 101 | ": Can't get bus-range for %s\n", pcictrl->full_name); |
| 102 | return; | 102 | goto out_put; |
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | if (bus_range[1] == bus_range[0]) | 105 | if (bus_range[1] == bus_range[0]) |
| @@ -111,12 +111,12 @@ static void __init efika_pcisetup(void) | |||
| 111 | printk(" controlled by %s\n", pcictrl->full_name); | 111 | printk(" controlled by %s\n", pcictrl->full_name); |
| 112 | printk("\n"); | 112 | printk("\n"); |
| 113 | 113 | ||
| 114 | hose = pcibios_alloc_controller(of_node_get(pcictrl)); | 114 | hose = pcibios_alloc_controller(pcictrl); |
| 115 | if (!hose) { | 115 | if (!hose) { |
| 116 | printk(KERN_WARNING EFIKA_PLATFORM_NAME | 116 | printk(KERN_WARNING EFIKA_PLATFORM_NAME |
| 117 | ": Can't allocate PCI controller structure for %s\n", | 117 | ": Can't allocate PCI controller structure for %s\n", |
| 118 | pcictrl->full_name); | 118 | pcictrl->full_name); |
| 119 | return; | 119 | goto out_put; |
| 120 | } | 120 | } |
| 121 | 121 | ||
| 122 | hose->first_busno = bus_range[0]; | 122 | hose->first_busno = bus_range[0]; |
| @@ -124,6 +124,9 @@ static void __init efika_pcisetup(void) | |||
| 124 | hose->ops = &rtas_pci_ops; | 124 | hose->ops = &rtas_pci_ops; |
| 125 | 125 | ||
| 126 | pci_process_bridge_OF_ranges(hose, pcictrl, 0); | 126 | pci_process_bridge_OF_ranges(hose, pcictrl, 0); |
| 127 | return; | ||
| 128 | out_put: | ||
| 129 | of_node_put(pcictrl); | ||
| 127 | } | 130 | } |
| 128 | 131 | ||
| 129 | #else | 132 | #else |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index 6e905314ad5d..41f3a7eda1de 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c | |||
| @@ -325,12 +325,16 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number) | |||
| 325 | clrbits32(&simple_gpio->simple_dvo, sync | out); | 325 | clrbits32(&simple_gpio->simple_dvo, sync | out); |
| 326 | clrbits8(&wkup_gpio->wkup_dvo, reset); | 326 | clrbits8(&wkup_gpio->wkup_dvo, reset); |
| 327 | 327 | ||
| 328 | /* wait at lease 1 us */ | 328 | /* wait for 1 us */ |
| 329 | udelay(2); | 329 | udelay(1); |
| 330 | 330 | ||
| 331 | /* Deassert reset */ | 331 | /* Deassert reset */ |
| 332 | setbits8(&wkup_gpio->wkup_dvo, reset); | 332 | setbits8(&wkup_gpio->wkup_dvo, reset); |
| 333 | 333 | ||
| 334 | /* wait at least 200ns */ | ||
| 335 | /* 7 ~= (200ns * timebase) / ns2sec */ | ||
| 336 | __delay(7); | ||
| 337 | |||
| 334 | /* Restore pin-muxing */ | 338 | /* Restore pin-muxing */ |
| 335 | out_be32(&simple_gpio->port_config, mux); | 339 | out_be32(&simple_gpio->port_config, mux); |
| 336 | 340 | ||
