diff options
Diffstat (limited to 'arch/arm/kernel/perf_event_xscale.c')
| -rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 90 |
1 files changed, 47 insertions, 43 deletions
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 3c4397491d08..e0cca10a8411 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
| @@ -40,7 +40,7 @@ enum xscale_perf_types { | |||
| 40 | }; | 40 | }; |
| 41 | 41 | ||
| 42 | enum xscale_counters { | 42 | enum xscale_counters { |
| 43 | XSCALE_CYCLE_COUNTER = 1, | 43 | XSCALE_CYCLE_COUNTER = 0, |
| 44 | XSCALE_COUNTER0, | 44 | XSCALE_COUNTER0, |
| 45 | XSCALE_COUNTER1, | 45 | XSCALE_COUNTER1, |
| 46 | XSCALE_COUNTER2, | 46 | XSCALE_COUNTER2, |
| @@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
| 222 | { | 222 | { |
| 223 | unsigned long pmnc; | 223 | unsigned long pmnc; |
| 224 | struct perf_sample_data data; | 224 | struct perf_sample_data data; |
| 225 | struct cpu_hw_events *cpuc; | 225 | struct pmu_hw_events *cpuc; |
| 226 | struct pt_regs *regs; | 226 | struct pt_regs *regs; |
| 227 | int idx; | 227 | int idx; |
| 228 | 228 | ||
| @@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
| 249 | perf_sample_data_init(&data, 0); | 249 | perf_sample_data_init(&data, 0); |
| 250 | 250 | ||
| 251 | cpuc = &__get_cpu_var(cpu_hw_events); | 251 | cpuc = &__get_cpu_var(cpu_hw_events); |
| 252 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | 252 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
| 253 | struct perf_event *event = cpuc->events[idx]; | 253 | struct perf_event *event = cpuc->events[idx]; |
| 254 | struct hw_perf_event *hwc; | 254 | struct hw_perf_event *hwc; |
| 255 | 255 | ||
| 256 | if (!test_bit(idx, cpuc->active_mask)) | ||
| 257 | continue; | ||
| 258 | |||
| 259 | if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) | 256 | if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) |
| 260 | continue; | 257 | continue; |
| 261 | 258 | ||
| @@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
| 266 | continue; | 263 | continue; |
| 267 | 264 | ||
| 268 | if (perf_event_overflow(event, &data, regs)) | 265 | if (perf_event_overflow(event, &data, regs)) |
| 269 | armpmu->disable(hwc, idx); | 266 | cpu_pmu->disable(hwc, idx); |
| 270 | } | 267 | } |
| 271 | 268 | ||
| 272 | irq_work_run(); | 269 | irq_work_run(); |
| @@ -284,6 +281,7 @@ static void | |||
| 284 | xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | 281 | xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) |
| 285 | { | 282 | { |
| 286 | unsigned long val, mask, evt, flags; | 283 | unsigned long val, mask, evt, flags; |
| 284 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
| 287 | 285 | ||
| 288 | switch (idx) { | 286 | switch (idx) { |
| 289 | case XSCALE_CYCLE_COUNTER: | 287 | case XSCALE_CYCLE_COUNTER: |
| @@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
| 305 | return; | 303 | return; |
| 306 | } | 304 | } |
| 307 | 305 | ||
| 308 | raw_spin_lock_irqsave(&pmu_lock, flags); | 306 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 309 | val = xscale1pmu_read_pmnc(); | 307 | val = xscale1pmu_read_pmnc(); |
| 310 | val &= ~mask; | 308 | val &= ~mask; |
| 311 | val |= evt; | 309 | val |= evt; |
| 312 | xscale1pmu_write_pmnc(val); | 310 | xscale1pmu_write_pmnc(val); |
| 313 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 311 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 314 | } | 312 | } |
| 315 | 313 | ||
| 316 | static void | 314 | static void |
| 317 | xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | 315 | xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) |
| 318 | { | 316 | { |
| 319 | unsigned long val, mask, evt, flags; | 317 | unsigned long val, mask, evt, flags; |
| 318 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
| 320 | 319 | ||
| 321 | switch (idx) { | 320 | switch (idx) { |
| 322 | case XSCALE_CYCLE_COUNTER: | 321 | case XSCALE_CYCLE_COUNTER: |
| @@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
| 336 | return; | 335 | return; |
| 337 | } | 336 | } |
| 338 | 337 | ||
| 339 | raw_spin_lock_irqsave(&pmu_lock, flags); | 338 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 340 | val = xscale1pmu_read_pmnc(); | 339 | val = xscale1pmu_read_pmnc(); |
| 341 | val &= ~mask; | 340 | val &= ~mask; |
| 342 | val |= evt; | 341 | val |= evt; |
| 343 | xscale1pmu_write_pmnc(val); | 342 | xscale1pmu_write_pmnc(val); |
| 344 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 343 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 345 | } | 344 | } |
| 346 | 345 | ||
| 347 | static int | 346 | static int |
| 348 | xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, | 347 | xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, |
| 349 | struct hw_perf_event *event) | 348 | struct hw_perf_event *event) |
| 350 | { | 349 | { |
| 351 | if (XSCALE_PERFCTR_CCNT == event->config_base) { | 350 | if (XSCALE_PERFCTR_CCNT == event->config_base) { |
| @@ -368,24 +367,26 @@ static void | |||
| 368 | xscale1pmu_start(void) | 367 | xscale1pmu_start(void) |
| 369 | { | 368 | { |
| 370 | unsigned long flags, val; | 369 | unsigned long flags, val; |
| 370 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
| 371 | 371 | ||
| 372 | raw_spin_lock_irqsave(&pmu_lock, flags); | 372 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 373 | val = xscale1pmu_read_pmnc(); | 373 | val = xscale1pmu_read_pmnc(); |
| 374 | val |= XSCALE_PMU_ENABLE; | 374 | val |= XSCALE_PMU_ENABLE; |
| 375 | xscale1pmu_write_pmnc(val); | 375 | xscale1pmu_write_pmnc(val); |
| 376 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 376 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | static void | 379 | static void |
| 380 | xscale1pmu_stop(void) | 380 | xscale1pmu_stop(void) |
| 381 | { | 381 | { |
| 382 | unsigned long flags, val; | 382 | unsigned long flags, val; |
| 383 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
| 383 | 384 | ||
| 384 | raw_spin_lock_irqsave(&pmu_lock, flags); | 385 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 385 | val = xscale1pmu_read_pmnc(); | 386 | val = xscale1pmu_read_pmnc(); |
| 386 | val &= ~XSCALE_PMU_ENABLE; | 387 | val &= ~XSCALE_PMU_ENABLE; |
| 387 | xscale1pmu_write_pmnc(val); | 388 | xscale1pmu_write_pmnc(val); |
| 388 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 389 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 389 | } | 390 | } |
| 390 | 391 | ||
| 391 | static inline u32 | 392 | static inline u32 |
| @@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val) | |||
| 424 | } | 425 | } |
| 425 | } | 426 | } |
| 426 | 427 | ||
| 427 | static const struct arm_pmu xscale1pmu = { | 428 | static int xscale_map_event(struct perf_event *event) |
| 429 | { | ||
| 430 | return map_cpu_event(event, &xscale_perf_map, | ||
| 431 | &xscale_perf_cache_map, 0xFF); | ||
| 432 | } | ||
| 433 | |||
| 434 | static struct arm_pmu xscale1pmu = { | ||
| 428 | .id = ARM_PERF_PMU_ID_XSCALE1, | 435 | .id = ARM_PERF_PMU_ID_XSCALE1, |
| 429 | .name = "xscale1", | 436 | .name = "xscale1", |
| 430 | .handle_irq = xscale1pmu_handle_irq, | 437 | .handle_irq = xscale1pmu_handle_irq, |
| @@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = { | |||
| 435 | .get_event_idx = xscale1pmu_get_event_idx, | 442 | .get_event_idx = xscale1pmu_get_event_idx, |
| 436 | .start = xscale1pmu_start, | 443 | .start = xscale1pmu_start, |
| 437 | .stop = xscale1pmu_stop, | 444 | .stop = xscale1pmu_stop, |
| 438 | .cache_map = &xscale_perf_cache_map, | 445 | .map_event = xscale_map_event, |
| 439 | .event_map = &xscale_perf_map, | ||
| 440 | .raw_event_mask = 0xFF, | ||
| 441 | .num_events = 3, | 446 | .num_events = 3, |
| 442 | .max_period = (1LLU << 32) - 1, | 447 | .max_period = (1LLU << 32) - 1, |
| 443 | }; | 448 | }; |
| 444 | 449 | ||
| 445 | static const struct arm_pmu *__init xscale1pmu_init(void) | 450 | static struct arm_pmu *__init xscale1pmu_init(void) |
| 446 | { | 451 | { |
| 447 | return &xscale1pmu; | 452 | return &xscale1pmu; |
| 448 | } | 453 | } |
| @@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
| 560 | { | 565 | { |
| 561 | unsigned long pmnc, of_flags; | 566 | unsigned long pmnc, of_flags; |
| 562 | struct perf_sample_data data; | 567 | struct perf_sample_data data; |
| 563 | struct cpu_hw_events *cpuc; | 568 | struct pmu_hw_events *cpuc; |
| 564 | struct pt_regs *regs; | 569 | struct pt_regs *regs; |
| 565 | int idx; | 570 | int idx; |
| 566 | 571 | ||
| @@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
| 581 | perf_sample_data_init(&data, 0); | 586 | perf_sample_data_init(&data, 0); |
| 582 | 587 | ||
| 583 | cpuc = &__get_cpu_var(cpu_hw_events); | 588 | cpuc = &__get_cpu_var(cpu_hw_events); |
| 584 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | 589 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
| 585 | struct perf_event *event = cpuc->events[idx]; | 590 | struct perf_event *event = cpuc->events[idx]; |
| 586 | struct hw_perf_event *hwc; | 591 | struct hw_perf_event *hwc; |
| 587 | 592 | ||
| 588 | if (!test_bit(idx, cpuc->active_mask)) | ||
| 589 | continue; | ||
| 590 | |||
| 591 | if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) | 593 | if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) |
| 592 | continue; | 594 | continue; |
| 593 | 595 | ||
| @@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
| 598 | continue; | 600 | continue; |
| 599 | 601 | ||
| 600 | if (perf_event_overflow(event, &data, regs)) | 602 | if (perf_event_overflow(event, &data, regs)) |
| 601 | armpmu->disable(hwc, idx); | 603 | cpu_pmu->disable(hwc, idx); |
| 602 | } | 604 | } |
| 603 | 605 | ||
| 604 | irq_work_run(); | 606 | irq_work_run(); |
| @@ -616,6 +618,7 @@ static void | |||
| 616 | xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | 618 | xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) |
| 617 | { | 619 | { |
| 618 | unsigned long flags, ien, evtsel; | 620 | unsigned long flags, ien, evtsel; |
| 621 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
| 619 | 622 | ||
| 620 | ien = xscale2pmu_read_int_enable(); | 623 | ien = xscale2pmu_read_int_enable(); |
| 621 | evtsel = xscale2pmu_read_event_select(); | 624 | evtsel = xscale2pmu_read_event_select(); |
| @@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
| 649 | return; | 652 | return; |
| 650 | } | 653 | } |
| 651 | 654 | ||
| 652 | raw_spin_lock_irqsave(&pmu_lock, flags); | 655 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 653 | xscale2pmu_write_event_select(evtsel); | 656 | xscale2pmu_write_event_select(evtsel); |
| 654 | xscale2pmu_write_int_enable(ien); | 657 | xscale2pmu_write_int_enable(ien); |
| 655 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 658 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 656 | } | 659 | } |
| 657 | 660 | ||
| 658 | static void | 661 | static void |
| 659 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | 662 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) |
| 660 | { | 663 | { |
| 661 | unsigned long flags, ien, evtsel; | 664 | unsigned long flags, ien, evtsel; |
| 665 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
| 662 | 666 | ||
| 663 | ien = xscale2pmu_read_int_enable(); | 667 | ien = xscale2pmu_read_int_enable(); |
| 664 | evtsel = xscale2pmu_read_event_select(); | 668 | evtsel = xscale2pmu_read_event_select(); |
| @@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
| 692 | return; | 696 | return; |
| 693 | } | 697 | } |
| 694 | 698 | ||
| 695 | raw_spin_lock_irqsave(&pmu_lock, flags); | 699 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 696 | xscale2pmu_write_event_select(evtsel); | 700 | xscale2pmu_write_event_select(evtsel); |
| 697 | xscale2pmu_write_int_enable(ien); | 701 | xscale2pmu_write_int_enable(ien); |
| 698 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 702 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 699 | } | 703 | } |
| 700 | 704 | ||
| 701 | static int | 705 | static int |
| 702 | xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, | 706 | xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, |
| 703 | struct hw_perf_event *event) | 707 | struct hw_perf_event *event) |
| 704 | { | 708 | { |
| 705 | int idx = xscale1pmu_get_event_idx(cpuc, event); | 709 | int idx = xscale1pmu_get_event_idx(cpuc, event); |
| @@ -718,24 +722,26 @@ static void | |||
| 718 | xscale2pmu_start(void) | 722 | xscale2pmu_start(void) |
| 719 | { | 723 | { |
| 720 | unsigned long flags, val; | 724 | unsigned long flags, val; |
| 725 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
| 721 | 726 | ||
| 722 | raw_spin_lock_irqsave(&pmu_lock, flags); | 727 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 723 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; | 728 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; |
| 724 | val |= XSCALE_PMU_ENABLE; | 729 | val |= XSCALE_PMU_ENABLE; |
| 725 | xscale2pmu_write_pmnc(val); | 730 | xscale2pmu_write_pmnc(val); |
| 726 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 731 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 727 | } | 732 | } |
| 728 | 733 | ||
| 729 | static void | 734 | static void |
| 730 | xscale2pmu_stop(void) | 735 | xscale2pmu_stop(void) |
| 731 | { | 736 | { |
| 732 | unsigned long flags, val; | 737 | unsigned long flags, val; |
| 738 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
| 733 | 739 | ||
| 734 | raw_spin_lock_irqsave(&pmu_lock, flags); | 740 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 735 | val = xscale2pmu_read_pmnc(); | 741 | val = xscale2pmu_read_pmnc(); |
| 736 | val &= ~XSCALE_PMU_ENABLE; | 742 | val &= ~XSCALE_PMU_ENABLE; |
| 737 | xscale2pmu_write_pmnc(val); | 743 | xscale2pmu_write_pmnc(val); |
| 738 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 744 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 739 | } | 745 | } |
| 740 | 746 | ||
| 741 | static inline u32 | 747 | static inline u32 |
| @@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val) | |||
| 786 | } | 792 | } |
| 787 | } | 793 | } |
| 788 | 794 | ||
| 789 | static const struct arm_pmu xscale2pmu = { | 795 | static struct arm_pmu xscale2pmu = { |
| 790 | .id = ARM_PERF_PMU_ID_XSCALE2, | 796 | .id = ARM_PERF_PMU_ID_XSCALE2, |
| 791 | .name = "xscale2", | 797 | .name = "xscale2", |
| 792 | .handle_irq = xscale2pmu_handle_irq, | 798 | .handle_irq = xscale2pmu_handle_irq, |
| @@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = { | |||
| 797 | .get_event_idx = xscale2pmu_get_event_idx, | 803 | .get_event_idx = xscale2pmu_get_event_idx, |
| 798 | .start = xscale2pmu_start, | 804 | .start = xscale2pmu_start, |
| 799 | .stop = xscale2pmu_stop, | 805 | .stop = xscale2pmu_stop, |
| 800 | .cache_map = &xscale_perf_cache_map, | 806 | .map_event = xscale_map_event, |
| 801 | .event_map = &xscale_perf_map, | ||
| 802 | .raw_event_mask = 0xFF, | ||
| 803 | .num_events = 5, | 807 | .num_events = 5, |
| 804 | .max_period = (1LLU << 32) - 1, | 808 | .max_period = (1LLU << 32) - 1, |
| 805 | }; | 809 | }; |
| 806 | 810 | ||
| 807 | static const struct arm_pmu *__init xscale2pmu_init(void) | 811 | static struct arm_pmu *__init xscale2pmu_init(void) |
| 808 | { | 812 | { |
| 809 | return &xscale2pmu; | 813 | return &xscale2pmu; |
| 810 | } | 814 | } |
| 811 | #else | 815 | #else |
| 812 | static const struct arm_pmu *__init xscale1pmu_init(void) | 816 | static struct arm_pmu *__init xscale1pmu_init(void) |
| 813 | { | 817 | { |
| 814 | return NULL; | 818 | return NULL; |
| 815 | } | 819 | } |
| 816 | 820 | ||
| 817 | static const struct arm_pmu *__init xscale2pmu_init(void) | 821 | static struct arm_pmu *__init xscale2pmu_init(void) |
| 818 | { | 822 | { |
| 819 | return NULL; | 823 | return NULL; |
| 820 | } | 824 | } |
