diff options
author | Sudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com> | 2012-07-30 07:00:02 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2012-11-09 06:37:25 -0500 |
commit | ed6f2a522398c26559f4da23a80aa6195e6284c7 (patch) | |
tree | f07a2bb16e7d5b121820256b51cf22c3be9bc352 /arch/arm/kernel/perf_event_xscale.c | |
parent | 513c99ce4e64245be1f83f56039ec4891b451955 (diff) |
ARM: perf: consistently use struct perf_event in arm_pmu functions
The arm_pmu functions have wildly varied parameters which can often be
derived from struct perf_event.
This patch changes the arm_pmu function prototypes so that struct
perf_event pointers are passed in preference to fields that can be
derived from the event.
Signed-off-by: Sudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event_xscale.c')
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 85 |
1 files changed, 48 insertions, 37 deletions
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 131ede6c2fdf..0c8265e53d5f 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -224,7 +224,8 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
224 | { | 224 | { |
225 | unsigned long pmnc; | 225 | unsigned long pmnc; |
226 | struct perf_sample_data data; | 226 | struct perf_sample_data data; |
227 | struct pmu_hw_events *cpuc; | 227 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
228 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | ||
228 | struct pt_regs *regs; | 229 | struct pt_regs *regs; |
229 | int idx; | 230 | int idx; |
230 | 231 | ||
@@ -248,7 +249,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
248 | 249 | ||
249 | regs = get_irq_regs(); | 250 | regs = get_irq_regs(); |
250 | 251 | ||
251 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
252 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 252 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
253 | struct perf_event *event = cpuc->events[idx]; | 253 | struct perf_event *event = cpuc->events[idx]; |
254 | struct hw_perf_event *hwc; | 254 | struct hw_perf_event *hwc; |
@@ -260,13 +260,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
260 | continue; | 260 | continue; |
261 | 261 | ||
262 | hwc = &event->hw; | 262 | hwc = &event->hw; |
263 | armpmu_event_update(event, hwc, idx); | 263 | armpmu_event_update(event); |
264 | perf_sample_data_init(&data, 0, hwc->last_period); | 264 | perf_sample_data_init(&data, 0, hwc->last_period); |
265 | if (!armpmu_event_set_period(event, hwc, idx)) | 265 | if (!armpmu_event_set_period(event)) |
266 | continue; | 266 | continue; |
267 | 267 | ||
268 | if (perf_event_overflow(event, &data, regs)) | 268 | if (perf_event_overflow(event, &data, regs)) |
269 | cpu_pmu->disable(hwc, idx); | 269 | cpu_pmu->disable(event); |
270 | } | 270 | } |
271 | 271 | ||
272 | irq_work_run(); | 272 | irq_work_run(); |
@@ -280,11 +280,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
280 | return IRQ_HANDLED; | 280 | return IRQ_HANDLED; |
281 | } | 281 | } |
282 | 282 | ||
283 | static void | 283 | static void xscale1pmu_enable_event(struct perf_event *event) |
284 | xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
285 | { | 284 | { |
286 | unsigned long val, mask, evt, flags; | 285 | unsigned long val, mask, evt, flags; |
286 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
287 | struct hw_perf_event *hwc = &event->hw; | ||
287 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 288 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
289 | int idx = hwc->idx; | ||
288 | 290 | ||
289 | switch (idx) { | 291 | switch (idx) { |
290 | case XSCALE_CYCLE_COUNTER: | 292 | case XSCALE_CYCLE_COUNTER: |
@@ -314,11 +316,13 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
314 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 316 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
315 | } | 317 | } |
316 | 318 | ||
317 | static void | 319 | static void xscale1pmu_disable_event(struct perf_event *event) |
318 | xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
319 | { | 320 | { |
320 | unsigned long val, mask, evt, flags; | 321 | unsigned long val, mask, evt, flags; |
322 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
323 | struct hw_perf_event *hwc = &event->hw; | ||
321 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 324 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
325 | int idx = hwc->idx; | ||
322 | 326 | ||
323 | switch (idx) { | 327 | switch (idx) { |
324 | case XSCALE_CYCLE_COUNTER: | 328 | case XSCALE_CYCLE_COUNTER: |
@@ -348,9 +352,10 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
348 | 352 | ||
349 | static int | 353 | static int |
350 | xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, | 354 | xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, |
351 | struct hw_perf_event *event) | 355 | struct perf_event *event) |
352 | { | 356 | { |
353 | if (XSCALE_PERFCTR_CCNT == event->config_base) { | 357 | struct hw_perf_event *hwc = &event->hw; |
358 | if (XSCALE_PERFCTR_CCNT == hwc->config_base) { | ||
354 | if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) | 359 | if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) |
355 | return -EAGAIN; | 360 | return -EAGAIN; |
356 | 361 | ||
@@ -366,8 +371,7 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, | |||
366 | } | 371 | } |
367 | } | 372 | } |
368 | 373 | ||
369 | static void | 374 | static void xscale1pmu_start(struct arm_pmu *cpu_pmu) |
370 | xscale1pmu_start(void) | ||
371 | { | 375 | { |
372 | unsigned long flags, val; | 376 | unsigned long flags, val; |
373 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 377 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -379,8 +383,7 @@ xscale1pmu_start(void) | |||
379 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 383 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
380 | } | 384 | } |
381 | 385 | ||
382 | static void | 386 | static void xscale1pmu_stop(struct arm_pmu *cpu_pmu) |
383 | xscale1pmu_stop(void) | ||
384 | { | 387 | { |
385 | unsigned long flags, val; | 388 | unsigned long flags, val; |
386 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 389 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -392,9 +395,10 @@ xscale1pmu_stop(void) | |||
392 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 395 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
393 | } | 396 | } |
394 | 397 | ||
395 | static inline u32 | 398 | static inline u32 xscale1pmu_read_counter(struct perf_event *event) |
396 | xscale1pmu_read_counter(int counter) | ||
397 | { | 399 | { |
400 | struct hw_perf_event *hwc = &event->hw; | ||
401 | int counter = hwc->idx; | ||
398 | u32 val = 0; | 402 | u32 val = 0; |
399 | 403 | ||
400 | switch (counter) { | 404 | switch (counter) { |
@@ -412,9 +416,11 @@ xscale1pmu_read_counter(int counter) | |||
412 | return val; | 416 | return val; |
413 | } | 417 | } |
414 | 418 | ||
415 | static inline void | 419 | static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val) |
416 | xscale1pmu_write_counter(int counter, u32 val) | ||
417 | { | 420 | { |
421 | struct hw_perf_event *hwc = &event->hw; | ||
422 | int counter = hwc->idx; | ||
423 | |||
418 | switch (counter) { | 424 | switch (counter) { |
419 | case XSCALE_CYCLE_COUNTER: | 425 | case XSCALE_CYCLE_COUNTER: |
420 | asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); | 426 | asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); |
@@ -565,7 +571,8 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
565 | { | 571 | { |
566 | unsigned long pmnc, of_flags; | 572 | unsigned long pmnc, of_flags; |
567 | struct perf_sample_data data; | 573 | struct perf_sample_data data; |
568 | struct pmu_hw_events *cpuc; | 574 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
575 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | ||
569 | struct pt_regs *regs; | 576 | struct pt_regs *regs; |
570 | int idx; | 577 | int idx; |
571 | 578 | ||
@@ -583,7 +590,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
583 | 590 | ||
584 | regs = get_irq_regs(); | 591 | regs = get_irq_regs(); |
585 | 592 | ||
586 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
587 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 593 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
588 | struct perf_event *event = cpuc->events[idx]; | 594 | struct perf_event *event = cpuc->events[idx]; |
589 | struct hw_perf_event *hwc; | 595 | struct hw_perf_event *hwc; |
@@ -595,13 +601,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
595 | continue; | 601 | continue; |
596 | 602 | ||
597 | hwc = &event->hw; | 603 | hwc = &event->hw; |
598 | armpmu_event_update(event, hwc, idx); | 604 | armpmu_event_update(event); |
599 | perf_sample_data_init(&data, 0, hwc->last_period); | 605 | perf_sample_data_init(&data, 0, hwc->last_period); |
600 | if (!armpmu_event_set_period(event, hwc, idx)) | 606 | if (!armpmu_event_set_period(event)) |
601 | continue; | 607 | continue; |
602 | 608 | ||
603 | if (perf_event_overflow(event, &data, regs)) | 609 | if (perf_event_overflow(event, &data, regs)) |
604 | cpu_pmu->disable(hwc, idx); | 610 | cpu_pmu->disable(event); |
605 | } | 611 | } |
606 | 612 | ||
607 | irq_work_run(); | 613 | irq_work_run(); |
@@ -615,11 +621,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
615 | return IRQ_HANDLED; | 621 | return IRQ_HANDLED; |
616 | } | 622 | } |
617 | 623 | ||
618 | static void | 624 | static void xscale2pmu_enable_event(struct perf_event *event) |
619 | xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
620 | { | 625 | { |
621 | unsigned long flags, ien, evtsel; | 626 | unsigned long flags, ien, evtsel; |
627 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
628 | struct hw_perf_event *hwc = &event->hw; | ||
622 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 629 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
630 | int idx = hwc->idx; | ||
623 | 631 | ||
624 | ien = xscale2pmu_read_int_enable(); | 632 | ien = xscale2pmu_read_int_enable(); |
625 | evtsel = xscale2pmu_read_event_select(); | 633 | evtsel = xscale2pmu_read_event_select(); |
@@ -659,11 +667,13 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
659 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 667 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
660 | } | 668 | } |
661 | 669 | ||
662 | static void | 670 | static void xscale2pmu_disable_event(struct perf_event *event) |
663 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
664 | { | 671 | { |
665 | unsigned long flags, ien, evtsel, of_flags; | 672 | unsigned long flags, ien, evtsel, of_flags; |
673 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
674 | struct hw_perf_event *hwc = &event->hw; | ||
666 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 675 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
676 | int idx = hwc->idx; | ||
667 | 677 | ||
668 | ien = xscale2pmu_read_int_enable(); | 678 | ien = xscale2pmu_read_int_enable(); |
669 | evtsel = xscale2pmu_read_event_select(); | 679 | evtsel = xscale2pmu_read_event_select(); |
@@ -711,7 +721,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
711 | 721 | ||
712 | static int | 722 | static int |
713 | xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, | 723 | xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, |
714 | struct hw_perf_event *event) | 724 | struct perf_event *event) |
715 | { | 725 | { |
716 | int idx = xscale1pmu_get_event_idx(cpuc, event); | 726 | int idx = xscale1pmu_get_event_idx(cpuc, event); |
717 | if (idx >= 0) | 727 | if (idx >= 0) |
@@ -725,8 +735,7 @@ out: | |||
725 | return idx; | 735 | return idx; |
726 | } | 736 | } |
727 | 737 | ||
728 | static void | 738 | static void xscale2pmu_start(struct arm_pmu *cpu_pmu) |
729 | xscale2pmu_start(void) | ||
730 | { | 739 | { |
731 | unsigned long flags, val; | 740 | unsigned long flags, val; |
732 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 741 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -738,8 +747,7 @@ xscale2pmu_start(void) | |||
738 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 747 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
739 | } | 748 | } |
740 | 749 | ||
741 | static void | 750 | static void xscale2pmu_stop(struct arm_pmu *cpu_pmu) |
742 | xscale2pmu_stop(void) | ||
743 | { | 751 | { |
744 | unsigned long flags, val; | 752 | unsigned long flags, val; |
745 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 753 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -751,9 +759,10 @@ xscale2pmu_stop(void) | |||
751 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 759 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
752 | } | 760 | } |
753 | 761 | ||
754 | static inline u32 | 762 | static inline u32 xscale2pmu_read_counter(struct perf_event *event) |
755 | xscale2pmu_read_counter(int counter) | ||
756 | { | 763 | { |
764 | struct hw_perf_event *hwc = &event->hw; | ||
765 | int counter = hwc->idx; | ||
757 | u32 val = 0; | 766 | u32 val = 0; |
758 | 767 | ||
759 | switch (counter) { | 768 | switch (counter) { |
@@ -777,9 +786,11 @@ xscale2pmu_read_counter(int counter) | |||
777 | return val; | 786 | return val; |
778 | } | 787 | } |
779 | 788 | ||
780 | static inline void | 789 | static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val) |
781 | xscale2pmu_write_counter(int counter, u32 val) | ||
782 | { | 790 | { |
791 | struct hw_perf_event *hwc = &event->hw; | ||
792 | int counter = hwc->idx; | ||
793 | |||
783 | switch (counter) { | 794 | switch (counter) { |
784 | case XSCALE_CYCLE_COUNTER: | 795 | case XSCALE_CYCLE_COUNTER: |
785 | asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); | 796 | asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); |