diff options
author | Mark Rutland <mark.rutland@arm.com> | 2011-05-17 06:20:11 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2011-08-31 05:50:12 -0400 |
commit | 8be3f9a2385f91f7bf5c58f351e24b9247898e8f (patch) | |
tree | 5e293c7702f36b7eafd611bd5e8e710719643ac4 /arch/arm/kernel/perf_event_xscale.c | |
parent | 3fc2c83087717dc88003428245d97b9d432fff2d (diff) |
ARM: perf: remove cpu-related misnomers
Currently struct cpu_hw_events stores data on events running on a
PMU associated with a CPU. As this data is general enough to be used
for system PMUs, this name is a misnomer, and may cause confusion when
it is used for system PMUs.
Additionally, 'armpmu' is commonly used as a parameter name for an
instance of struct arm_pmu. The name is also used for a global instance
which represents the CPU's PMU.
As cpu_hw_events is now not tied to CPU PMUs, it is renamed to
pmu_hw_events, with instances of it renamed similarly. As the global
'armpmu' is CPU-specfic, it is renamed to cpu_pmu. This should make it
clearer which code is generic, and which is coupled with the CPU.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Jamie Iles <jamie@jamieiles.com>
Reviewed-by: Ashwin Chaugule <ashwinc@codeaurora.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event_xscale.c')
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index d4c7610d25b9..e0cca10a8411 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
222 | { | 222 | { |
223 | unsigned long pmnc; | 223 | unsigned long pmnc; |
224 | struct perf_sample_data data; | 224 | struct perf_sample_data data; |
225 | struct cpu_hw_events *cpuc; | 225 | struct pmu_hw_events *cpuc; |
226 | struct pt_regs *regs; | 226 | struct pt_regs *regs; |
227 | int idx; | 227 | int idx; |
228 | 228 | ||
@@ -249,7 +249,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
249 | perf_sample_data_init(&data, 0); | 249 | perf_sample_data_init(&data, 0); |
250 | 250 | ||
251 | cpuc = &__get_cpu_var(cpu_hw_events); | 251 | cpuc = &__get_cpu_var(cpu_hw_events); |
252 | for (idx = 0; idx < armpmu->num_events; ++idx) { | 252 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
253 | struct perf_event *event = cpuc->events[idx]; | 253 | struct perf_event *event = cpuc->events[idx]; |
254 | struct hw_perf_event *hwc; | 254 | struct hw_perf_event *hwc; |
255 | 255 | ||
@@ -263,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
263 | continue; | 263 | continue; |
264 | 264 | ||
265 | if (perf_event_overflow(event, &data, regs)) | 265 | if (perf_event_overflow(event, &data, regs)) |
266 | armpmu->disable(hwc, idx); | 266 | cpu_pmu->disable(hwc, idx); |
267 | } | 267 | } |
268 | 268 | ||
269 | irq_work_run(); | 269 | irq_work_run(); |
@@ -281,7 +281,7 @@ static void | |||
281 | xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | 281 | xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) |
282 | { | 282 | { |
283 | unsigned long val, mask, evt, flags; | 283 | unsigned long val, mask, evt, flags; |
284 | struct cpu_hw_events *events = armpmu->get_hw_events(); | 284 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
285 | 285 | ||
286 | switch (idx) { | 286 | switch (idx) { |
287 | case XSCALE_CYCLE_COUNTER: | 287 | case XSCALE_CYCLE_COUNTER: |
@@ -315,7 +315,7 @@ static void | |||
315 | xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | 315 | xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) |
316 | { | 316 | { |
317 | unsigned long val, mask, evt, flags; | 317 | unsigned long val, mask, evt, flags; |
318 | struct cpu_hw_events *events = armpmu->get_hw_events(); | 318 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
319 | 319 | ||
320 | switch (idx) { | 320 | switch (idx) { |
321 | case XSCALE_CYCLE_COUNTER: | 321 | case XSCALE_CYCLE_COUNTER: |
@@ -344,7 +344,7 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
344 | } | 344 | } |
345 | 345 | ||
346 | static int | 346 | static int |
347 | xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, | 347 | xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, |
348 | struct hw_perf_event *event) | 348 | struct hw_perf_event *event) |
349 | { | 349 | { |
350 | if (XSCALE_PERFCTR_CCNT == event->config_base) { | 350 | if (XSCALE_PERFCTR_CCNT == event->config_base) { |
@@ -367,7 +367,7 @@ static void | |||
367 | xscale1pmu_start(void) | 367 | xscale1pmu_start(void) |
368 | { | 368 | { |
369 | unsigned long flags, val; | 369 | unsigned long flags, val; |
370 | struct cpu_hw_events *events = armpmu->get_hw_events(); | 370 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
371 | 371 | ||
372 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 372 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
373 | val = xscale1pmu_read_pmnc(); | 373 | val = xscale1pmu_read_pmnc(); |
@@ -380,7 +380,7 @@ static void | |||
380 | xscale1pmu_stop(void) | 380 | xscale1pmu_stop(void) |
381 | { | 381 | { |
382 | unsigned long flags, val; | 382 | unsigned long flags, val; |
383 | struct cpu_hw_events *events = armpmu->get_hw_events(); | 383 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
384 | 384 | ||
385 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 385 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
386 | val = xscale1pmu_read_pmnc(); | 386 | val = xscale1pmu_read_pmnc(); |
@@ -565,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
565 | { | 565 | { |
566 | unsigned long pmnc, of_flags; | 566 | unsigned long pmnc, of_flags; |
567 | struct perf_sample_data data; | 567 | struct perf_sample_data data; |
568 | struct cpu_hw_events *cpuc; | 568 | struct pmu_hw_events *cpuc; |
569 | struct pt_regs *regs; | 569 | struct pt_regs *regs; |
570 | int idx; | 570 | int idx; |
571 | 571 | ||
@@ -586,7 +586,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
586 | perf_sample_data_init(&data, 0); | 586 | perf_sample_data_init(&data, 0); |
587 | 587 | ||
588 | cpuc = &__get_cpu_var(cpu_hw_events); | 588 | cpuc = &__get_cpu_var(cpu_hw_events); |
589 | for (idx = 0; idx < armpmu->num_events; ++idx) { | 589 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
590 | struct perf_event *event = cpuc->events[idx]; | 590 | struct perf_event *event = cpuc->events[idx]; |
591 | struct hw_perf_event *hwc; | 591 | struct hw_perf_event *hwc; |
592 | 592 | ||
@@ -600,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
600 | continue; | 600 | continue; |
601 | 601 | ||
602 | if (perf_event_overflow(event, &data, regs)) | 602 | if (perf_event_overflow(event, &data, regs)) |
603 | armpmu->disable(hwc, idx); | 603 | cpu_pmu->disable(hwc, idx); |
604 | } | 604 | } |
605 | 605 | ||
606 | irq_work_run(); | 606 | irq_work_run(); |
@@ -618,7 +618,7 @@ static void | |||
618 | xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | 618 | xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) |
619 | { | 619 | { |
620 | unsigned long flags, ien, evtsel; | 620 | unsigned long flags, ien, evtsel; |
621 | struct cpu_hw_events *events = armpmu->get_hw_events(); | 621 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
622 | 622 | ||
623 | ien = xscale2pmu_read_int_enable(); | 623 | ien = xscale2pmu_read_int_enable(); |
624 | evtsel = xscale2pmu_read_event_select(); | 624 | evtsel = xscale2pmu_read_event_select(); |
@@ -662,7 +662,7 @@ static void | |||
662 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | 662 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) |
663 | { | 663 | { |
664 | unsigned long flags, ien, evtsel; | 664 | unsigned long flags, ien, evtsel; |
665 | struct cpu_hw_events *events = armpmu->get_hw_events(); | 665 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
666 | 666 | ||
667 | ien = xscale2pmu_read_int_enable(); | 667 | ien = xscale2pmu_read_int_enable(); |
668 | evtsel = xscale2pmu_read_event_select(); | 668 | evtsel = xscale2pmu_read_event_select(); |
@@ -703,7 +703,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
703 | } | 703 | } |
704 | 704 | ||
705 | static int | 705 | static int |
706 | xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, | 706 | xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, |
707 | struct hw_perf_event *event) | 707 | struct hw_perf_event *event) |
708 | { | 708 | { |
709 | int idx = xscale1pmu_get_event_idx(cpuc, event); | 709 | int idx = xscale1pmu_get_event_idx(cpuc, event); |
@@ -722,7 +722,7 @@ static void | |||
722 | xscale2pmu_start(void) | 722 | xscale2pmu_start(void) |
723 | { | 723 | { |
724 | unsigned long flags, val; | 724 | unsigned long flags, val; |
725 | struct cpu_hw_events *events = armpmu->get_hw_events(); | 725 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
726 | 726 | ||
727 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 727 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
728 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; | 728 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; |
@@ -735,7 +735,7 @@ static void | |||
735 | xscale2pmu_stop(void) | 735 | xscale2pmu_stop(void) |
736 | { | 736 | { |
737 | unsigned long flags, val; | 737 | unsigned long flags, val; |
738 | struct cpu_hw_events *events = armpmu->get_hw_events(); | 738 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
739 | 739 | ||
740 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 740 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
741 | val = xscale2pmu_read_pmnc(); | 741 | val = xscale2pmu_read_pmnc(); |