aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/ecard.c1
-rw-r--r--arch/arm/kernel/perf_event.c45
-rw-r--r--arch/arm/kernel/perf_event_v6.c22
-rw-r--r--arch/arm/kernel/perf_event_v7.c11
-rw-r--r--arch/arm/kernel/perf_event_xscale.c20
5 files changed, 64 insertions, 35 deletions
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index 4dd0edab6a65..1651d4950744 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -242,6 +242,7 @@ static void ecard_init_pgtables(struct mm_struct *mm)
242 242
243 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); 243 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
244 244
245 vma.vm_flags = VM_EXEC;
245 vma.vm_mm = mm; 246 vma.vm_mm = mm;
246 247
247 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); 248 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5bb91bf3d47f..b2abfa18f137 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
180u64 180u64
181armpmu_event_update(struct perf_event *event, 181armpmu_event_update(struct perf_event *event,
182 struct hw_perf_event *hwc, 182 struct hw_perf_event *hwc,
183 int idx, int overflow) 183 int idx)
184{ 184{
185 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 185 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
186 u64 delta, prev_raw_count, new_raw_count; 186 u64 delta, prev_raw_count, new_raw_count;
@@ -193,13 +193,7 @@ again:
193 new_raw_count) != prev_raw_count) 193 new_raw_count) != prev_raw_count)
194 goto again; 194 goto again;
195 195
196 new_raw_count &= armpmu->max_period; 196 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
197 prev_raw_count &= armpmu->max_period;
198
199 if (overflow)
200 delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
201 else
202 delta = new_raw_count - prev_raw_count;
203 197
204 local64_add(delta, &event->count); 198 local64_add(delta, &event->count);
205 local64_sub(delta, &hwc->period_left); 199 local64_sub(delta, &hwc->period_left);
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
216 if (hwc->idx < 0) 210 if (hwc->idx < 0)
217 return; 211 return;
218 212
219 armpmu_event_update(event, hwc, hwc->idx, 0); 213 armpmu_event_update(event, hwc, hwc->idx);
220} 214}
221 215
222static void 216static void
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
232 if (!(hwc->state & PERF_HES_STOPPED)) { 226 if (!(hwc->state & PERF_HES_STOPPED)) {
233 armpmu->disable(hwc, hwc->idx); 227 armpmu->disable(hwc, hwc->idx);
234 barrier(); /* why? */ 228 barrier(); /* why? */
235 armpmu_event_update(event, hwc, hwc->idx, 0); 229 armpmu_event_update(event, hwc, hwc->idx);
236 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 230 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
237 } 231 }
238} 232}
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
518 hwc->config_base |= (unsigned long)mapping; 512 hwc->config_base |= (unsigned long)mapping;
519 513
520 if (!hwc->sample_period) { 514 if (!hwc->sample_period) {
521 hwc->sample_period = armpmu->max_period; 515 /*
516 * For non-sampling runs, limit the sample_period to half
517 * of the counter width. That way, the new counter value
518 * is far less likely to overtake the previous one unless
519 * you have some serious IRQ latency issues.
520 */
521 hwc->sample_period = armpmu->max_period >> 1;
522 hwc->last_period = hwc->sample_period; 522 hwc->last_period = hwc->sample_period;
523 local64_set(&hwc->period_left, hwc->sample_period); 523 local64_set(&hwc->period_left, hwc->sample_period);
524 } 524 }
@@ -680,6 +680,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
680} 680}
681 681
682/* 682/*
683 * PMU hardware loses all context when a CPU goes offline.
684 * When a CPU is hotplugged back in, since some hardware registers are
685 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
686 * junk values out of them.
687 */
688static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
689 unsigned long action, void *hcpu)
690{
691 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
692 return NOTIFY_DONE;
693
694 if (cpu_pmu && cpu_pmu->reset)
695 cpu_pmu->reset(NULL);
696
697 return NOTIFY_OK;
698}
699
700static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
701 .notifier_call = pmu_cpu_notify,
702};
703
704/*
683 * CPU PMU identification and registration. 705 * CPU PMU identification and registration.
684 */ 706 */
685static int __init 707static int __init
@@ -730,6 +752,7 @@ init_hw_perf_events(void)
730 pr_info("enabled with %s PMU driver, %d counters available\n", 752 pr_info("enabled with %s PMU driver, %d counters available\n",
731 cpu_pmu->name, cpu_pmu->num_events); 753 cpu_pmu->name, cpu_pmu->num_events);
732 cpu_pmu_init(cpu_pmu); 754 cpu_pmu_init(cpu_pmu);
755 register_cpu_notifier(&pmu_cpu_notifier);
733 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); 756 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
734 } else { 757 } else {
735 pr_info("no hardware support available\n"); 758 pr_info("no hardware support available\n");
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 533be9930ec2..b78af0cc6ef3 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
467 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 467 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
468} 468}
469 469
470static int counter_is_active(unsigned long pmcr, int idx)
471{
472 unsigned long mask = 0;
473 if (idx == ARMV6_CYCLE_COUNTER)
474 mask = ARMV6_PMCR_CCOUNT_IEN;
475 else if (idx == ARMV6_COUNTER0)
476 mask = ARMV6_PMCR_COUNT0_IEN;
477 else if (idx == ARMV6_COUNTER1)
478 mask = ARMV6_PMCR_COUNT1_IEN;
479
480 if (mask)
481 return pmcr & mask;
482
483 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
484 return 0;
485}
486
487static irqreturn_t 470static irqreturn_t
488armv6pmu_handle_irq(int irq_num, 471armv6pmu_handle_irq(int irq_num,
489 void *dev) 472 void *dev)
@@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num,
513 struct perf_event *event = cpuc->events[idx]; 496 struct perf_event *event = cpuc->events[idx];
514 struct hw_perf_event *hwc; 497 struct hw_perf_event *hwc;
515 498
516 if (!counter_is_active(pmcr, idx)) 499 /* Ignore if we don't have an event. */
500 if (!event)
517 continue; 501 continue;
518 502
519 /* 503 /*
@@ -524,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
524 continue; 508 continue;
525 509
526 hwc = &event->hw; 510 hwc = &event->hw;
527 armpmu_event_update(event, hwc, idx, 1); 511 armpmu_event_update(event, hwc, idx);
528 data.period = event->hw.last_period; 512 data.period = event->hw.last_period;
529 if (!armpmu_event_set_period(event, hwc, idx)) 513 if (!armpmu_event_set_period(event, hwc, idx))
530 continue; 514 continue;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 6933244c68f9..4d7095af2ab3 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -809,6 +809,11 @@ static inline int armv7_pmnc_disable_intens(int idx)
809 809
810 counter = ARMV7_IDX_TO_COUNTER(idx); 810 counter = ARMV7_IDX_TO_COUNTER(idx);
811 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); 811 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
812 isb();
813 /* Clear the overflow flag in case an interrupt is pending. */
814 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
815 isb();
816
812 return idx; 817 return idx;
813} 818}
814 819
@@ -955,6 +960,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
955 struct perf_event *event = cpuc->events[idx]; 960 struct perf_event *event = cpuc->events[idx];
956 struct hw_perf_event *hwc; 961 struct hw_perf_event *hwc;
957 962
963 /* Ignore if we don't have an event. */
964 if (!event)
965 continue;
966
958 /* 967 /*
959 * We have a single interrupt for all counters. Check that 968 * We have a single interrupt for all counters. Check that
960 * each counter has overflowed before we process it. 969 * each counter has overflowed before we process it.
@@ -963,7 +972,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
963 continue; 972 continue;
964 973
965 hwc = &event->hw; 974 hwc = &event->hw;
966 armpmu_event_update(event, hwc, idx, 1); 975 armpmu_event_update(event, hwc, idx);
967 data.period = event->hw.last_period; 976 data.period = event->hw.last_period;
968 if (!armpmu_event_set_period(event, hwc, idx)) 977 if (!armpmu_event_set_period(event, hwc, idx))
969 continue; 978 continue;
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 3b99d8269829..71a21e6712f5 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -255,11 +255,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
255 struct perf_event *event = cpuc->events[idx]; 255 struct perf_event *event = cpuc->events[idx];
256 struct hw_perf_event *hwc; 256 struct hw_perf_event *hwc;
257 257
258 if (!event)
259 continue;
260
258 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) 261 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
259 continue; 262 continue;
260 263
261 hwc = &event->hw; 264 hwc = &event->hw;
262 armpmu_event_update(event, hwc, idx, 1); 265 armpmu_event_update(event, hwc, idx);
263 data.period = event->hw.last_period; 266 data.period = event->hw.last_period;
264 if (!armpmu_event_set_period(event, hwc, idx)) 267 if (!armpmu_event_set_period(event, hwc, idx))
265 continue; 268 continue;
@@ -592,11 +595,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
592 struct perf_event *event = cpuc->events[idx]; 595 struct perf_event *event = cpuc->events[idx];
593 struct hw_perf_event *hwc; 596 struct hw_perf_event *hwc;
594 597
595 if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) 598 if (!event)
599 continue;
600
601 if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
596 continue; 602 continue;
597 603
598 hwc = &event->hw; 604 hwc = &event->hw;
599 armpmu_event_update(event, hwc, idx, 1); 605 armpmu_event_update(event, hwc, idx);
600 data.period = event->hw.last_period; 606 data.period = event->hw.last_period;
601 if (!armpmu_event_set_period(event, hwc, idx)) 607 if (!armpmu_event_set_period(event, hwc, idx))
602 continue; 608 continue;
@@ -663,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
663static void 669static void
664xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) 670xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
665{ 671{
666 unsigned long flags, ien, evtsel; 672 unsigned long flags, ien, evtsel, of_flags;
667 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 673 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
668 674
669 ien = xscale2pmu_read_int_enable(); 675 ien = xscale2pmu_read_int_enable();
@@ -672,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
672 switch (idx) { 678 switch (idx) {
673 case XSCALE_CYCLE_COUNTER: 679 case XSCALE_CYCLE_COUNTER:
674 ien &= ~XSCALE2_CCOUNT_INT_EN; 680 ien &= ~XSCALE2_CCOUNT_INT_EN;
681 of_flags = XSCALE2_CCOUNT_OVERFLOW;
675 break; 682 break;
676 case XSCALE_COUNTER0: 683 case XSCALE_COUNTER0:
677 ien &= ~XSCALE2_COUNT0_INT_EN; 684 ien &= ~XSCALE2_COUNT0_INT_EN;
678 evtsel &= ~XSCALE2_COUNT0_EVT_MASK; 685 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
679 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; 686 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
687 of_flags = XSCALE2_COUNT0_OVERFLOW;
680 break; 688 break;
681 case XSCALE_COUNTER1: 689 case XSCALE_COUNTER1:
682 ien &= ~XSCALE2_COUNT1_INT_EN; 690 ien &= ~XSCALE2_COUNT1_INT_EN;
683 evtsel &= ~XSCALE2_COUNT1_EVT_MASK; 691 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
684 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; 692 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
693 of_flags = XSCALE2_COUNT1_OVERFLOW;
685 break; 694 break;
686 case XSCALE_COUNTER2: 695 case XSCALE_COUNTER2:
687 ien &= ~XSCALE2_COUNT2_INT_EN; 696 ien &= ~XSCALE2_COUNT2_INT_EN;
688 evtsel &= ~XSCALE2_COUNT2_EVT_MASK; 697 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
689 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; 698 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
699 of_flags = XSCALE2_COUNT2_OVERFLOW;
690 break; 700 break;
691 case XSCALE_COUNTER3: 701 case XSCALE_COUNTER3:
692 ien &= ~XSCALE2_COUNT3_INT_EN; 702 ien &= ~XSCALE2_COUNT3_INT_EN;
693 evtsel &= ~XSCALE2_COUNT3_EVT_MASK; 703 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
694 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; 704 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
705 of_flags = XSCALE2_COUNT3_OVERFLOW;
695 break; 706 break;
696 default: 707 default:
697 WARN_ONCE(1, "invalid counter number (%d)\n", idx); 708 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
@@ -701,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
701 raw_spin_lock_irqsave(&events->pmu_lock, flags); 712 raw_spin_lock_irqsave(&events->pmu_lock, flags);
702 xscale2pmu_write_event_select(evtsel); 713 xscale2pmu_write_event_select(evtsel);
703 xscale2pmu_write_int_enable(ien); 714 xscale2pmu_write_int_enable(ien);
715 xscale2pmu_write_overflow_flags(of_flags);
704 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 716 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
705} 717}
706 718