diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/debug.S | 19 | ||||
-rw-r--r-- | arch/arm/kernel/etm.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/kprobes-decode.c | 10 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 33 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 26 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/sleep.S | 14 | ||||
-rw-r--r-- | arch/arm/kernel/swp_emulate.c | 2 |
9 files changed, 68 insertions, 46 deletions
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index d2d983be096d..bcd66e00bdbe 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S | |||
@@ -25,7 +25,7 @@ | |||
25 | .macro addruart, rp, rv | 25 | .macro addruart, rp, rv |
26 | .endm | 26 | .endm |
27 | 27 | ||
28 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) | 28 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) |
29 | 29 | ||
30 | .macro senduart, rd, rx | 30 | .macro senduart, rd, rx |
31 | mcr p14, 0, \rd, c0, c5, 0 | 31 | mcr p14, 0, \rd, c0, c5, 0 |
@@ -49,23 +49,6 @@ | |||
49 | 1002: | 49 | 1002: |
50 | .endm | 50 | .endm |
51 | 51 | ||
52 | #elif defined(CONFIG_CPU_V7) | ||
53 | |||
54 | .macro senduart, rd, rx | ||
55 | mcr p14, 0, \rd, c0, c5, 0 | ||
56 | .endm | ||
57 | |||
58 | .macro busyuart, rd, rx | ||
59 | busy: mrc p14, 0, pc, c0, c1, 0 | ||
60 | bcs busy | ||
61 | .endm | ||
62 | |||
63 | .macro waituart, rd, rx | ||
64 | wait: mrc p14, 0, pc, c0, c1, 0 | ||
65 | bcs wait | ||
66 | |||
67 | .endm | ||
68 | |||
69 | #elif defined(CONFIG_CPU_XSCALE) | 52 | #elif defined(CONFIG_CPU_XSCALE) |
70 | 53 | ||
71 | .macro senduart, rd, rx | 54 | .macro senduart, rd, rx |
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c index 052b509e2d5f..1bec8b5f22f0 100644 --- a/arch/arm/kernel/etm.c +++ b/arch/arm/kernel/etm.c | |||
@@ -338,7 +338,7 @@ static struct miscdevice etb_miscdev = { | |||
338 | .fops = &etb_fops, | 338 | .fops = &etb_fops, |
339 | }; | 339 | }; |
340 | 340 | ||
341 | static int __init etb_probe(struct amba_device *dev, const struct amba_id *id) | 341 | static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id) |
342 | { | 342 | { |
343 | struct tracectx *t = &tracer; | 343 | struct tracectx *t = &tracer; |
344 | int ret = 0; | 344 | int ret = 0; |
@@ -530,7 +530,7 @@ static ssize_t trace_mode_store(struct kobject *kobj, | |||
530 | static struct kobj_attribute trace_mode_attr = | 530 | static struct kobj_attribute trace_mode_attr = |
531 | __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store); | 531 | __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store); |
532 | 532 | ||
533 | static int __init etm_probe(struct amba_device *dev, const struct amba_id *id) | 533 | static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id) |
534 | { | 534 | { |
535 | struct tracectx *t = &tracer; | 535 | struct tracectx *t = &tracer; |
536 | int ret = 0; | 536 | int ret = 0; |
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 8f6ed43861f1..23891317dc4b 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c | |||
@@ -594,7 +594,8 @@ static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs) | |||
594 | long cpsr = regs->ARM_cpsr; | 594 | long cpsr = regs->ARM_cpsr; |
595 | 595 | ||
596 | fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn); | 596 | fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn); |
597 | regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */ | 597 | if (rn != 15) |
598 | regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */ | ||
598 | rdv = fnr.r1; | 599 | rdv = fnr.r1; |
599 | 600 | ||
600 | if (rd == 15) { | 601 | if (rd == 15) { |
@@ -622,10 +623,11 @@ static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs) | |||
622 | long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd]; | 623 | long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd]; |
623 | long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn]; | 624 | long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn]; |
624 | long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ | 625 | long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ |
626 | long rnv_wb; | ||
625 | 627 | ||
626 | /* Save Rn in case of writeback. */ | 628 | rnv_wb = insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn); |
627 | regs->uregs[rn] = | 629 | if (rn != 15) |
628 | insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn); | 630 | regs->uregs[rn] = rnv_wb; /* Save Rn in case of writeback. */ |
629 | } | 631 | } |
630 | 632 | ||
631 | static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs) | 633 | static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs) |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 22e194eb8536..69cfee0fe00f 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -79,6 +79,7 @@ struct arm_pmu { | |||
79 | void (*write_counter)(int idx, u32 val); | 79 | void (*write_counter)(int idx, u32 val); |
80 | void (*start)(void); | 80 | void (*start)(void); |
81 | void (*stop)(void); | 81 | void (*stop)(void); |
82 | void (*reset)(void *); | ||
82 | const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] | 83 | const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] |
83 | [PERF_COUNT_HW_CACHE_OP_MAX] | 84 | [PERF_COUNT_HW_CACHE_OP_MAX] |
84 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 85 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
@@ -204,11 +205,9 @@ armpmu_event_set_period(struct perf_event *event, | |||
204 | static u64 | 205 | static u64 |
205 | armpmu_event_update(struct perf_event *event, | 206 | armpmu_event_update(struct perf_event *event, |
206 | struct hw_perf_event *hwc, | 207 | struct hw_perf_event *hwc, |
207 | int idx) | 208 | int idx, int overflow) |
208 | { | 209 | { |
209 | int shift = 64 - 32; | 210 | u64 delta, prev_raw_count, new_raw_count; |
210 | s64 prev_raw_count, new_raw_count; | ||
211 | u64 delta; | ||
212 | 211 | ||
213 | again: | 212 | again: |
214 | prev_raw_count = local64_read(&hwc->prev_count); | 213 | prev_raw_count = local64_read(&hwc->prev_count); |
@@ -218,8 +217,13 @@ again: | |||
218 | new_raw_count) != prev_raw_count) | 217 | new_raw_count) != prev_raw_count) |
219 | goto again; | 218 | goto again; |
220 | 219 | ||
221 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | 220 | new_raw_count &= armpmu->max_period; |
222 | delta >>= shift; | 221 | prev_raw_count &= armpmu->max_period; |
222 | |||
223 | if (overflow) | ||
224 | delta = armpmu->max_period - prev_raw_count + new_raw_count; | ||
225 | else | ||
226 | delta = new_raw_count - prev_raw_count; | ||
223 | 227 | ||
224 | local64_add(delta, &event->count); | 228 | local64_add(delta, &event->count); |
225 | local64_sub(delta, &hwc->period_left); | 229 | local64_sub(delta, &hwc->period_left); |
@@ -236,7 +240,7 @@ armpmu_read(struct perf_event *event) | |||
236 | if (hwc->idx < 0) | 240 | if (hwc->idx < 0) |
237 | return; | 241 | return; |
238 | 242 | ||
239 | armpmu_event_update(event, hwc, hwc->idx); | 243 | armpmu_event_update(event, hwc, hwc->idx, 0); |
240 | } | 244 | } |
241 | 245 | ||
242 | static void | 246 | static void |
@@ -254,7 +258,7 @@ armpmu_stop(struct perf_event *event, int flags) | |||
254 | if (!(hwc->state & PERF_HES_STOPPED)) { | 258 | if (!(hwc->state & PERF_HES_STOPPED)) { |
255 | armpmu->disable(hwc, hwc->idx); | 259 | armpmu->disable(hwc, hwc->idx); |
256 | barrier(); /* why? */ | 260 | barrier(); /* why? */ |
257 | armpmu_event_update(event, hwc, hwc->idx); | 261 | armpmu_event_update(event, hwc, hwc->idx, 0); |
258 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | 262 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
259 | } | 263 | } |
260 | } | 264 | } |
@@ -624,6 +628,19 @@ static struct pmu pmu = { | |||
624 | #include "perf_event_v6.c" | 628 | #include "perf_event_v6.c" |
625 | #include "perf_event_v7.c" | 629 | #include "perf_event_v7.c" |
626 | 630 | ||
631 | /* | ||
632 | * Ensure the PMU has sane values out of reset. | ||
633 | * This requires SMP to be available, so exists as a separate initcall. | ||
634 | */ | ||
635 | static int __init | ||
636 | armpmu_reset(void) | ||
637 | { | ||
638 | if (armpmu && armpmu->reset) | ||
639 | return on_each_cpu(armpmu->reset, NULL, 1); | ||
640 | return 0; | ||
641 | } | ||
642 | arch_initcall(armpmu_reset); | ||
643 | |||
627 | static int __init | 644 | static int __init |
628 | init_hw_perf_events(void) | 645 | init_hw_perf_events(void) |
629 | { | 646 | { |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 6fc2d228db55..f1e8dd94afe8 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -474,7 +474,7 @@ armv6pmu_handle_irq(int irq_num, | |||
474 | continue; | 474 | continue; |
475 | 475 | ||
476 | hwc = &event->hw; | 476 | hwc = &event->hw; |
477 | armpmu_event_update(event, hwc, idx); | 477 | armpmu_event_update(event, hwc, idx, 1); |
478 | data.period = event->hw.last_period; | 478 | data.period = event->hw.last_period; |
479 | if (!armpmu_event_set_period(event, hwc, idx)) | 479 | if (!armpmu_event_set_period(event, hwc, idx)) |
480 | continue; | 480 | continue; |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 2e1402556fa0..4960686afb58 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -466,6 +466,7 @@ static inline unsigned long armv7_pmnc_read(void) | |||
466 | static inline void armv7_pmnc_write(unsigned long val) | 466 | static inline void armv7_pmnc_write(unsigned long val) |
467 | { | 467 | { |
468 | val &= ARMV7_PMNC_MASK; | 468 | val &= ARMV7_PMNC_MASK; |
469 | isb(); | ||
469 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); | 470 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); |
470 | } | 471 | } |
471 | 472 | ||
@@ -502,6 +503,7 @@ static inline int armv7_pmnc_select_counter(unsigned int idx) | |||
502 | 503 | ||
503 | val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; | 504 | val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; |
504 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); | 505 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); |
506 | isb(); | ||
505 | 507 | ||
506 | return idx; | 508 | return idx; |
507 | } | 509 | } |
@@ -780,7 +782,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
780 | continue; | 782 | continue; |
781 | 783 | ||
782 | hwc = &event->hw; | 784 | hwc = &event->hw; |
783 | armpmu_event_update(event, hwc, idx); | 785 | armpmu_event_update(event, hwc, idx, 1); |
784 | data.period = event->hw.last_period; | 786 | data.period = event->hw.last_period; |
785 | if (!armpmu_event_set_period(event, hwc, idx)) | 787 | if (!armpmu_event_set_period(event, hwc, idx)) |
786 | continue; | 788 | continue; |
@@ -847,6 +849,18 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | |||
847 | } | 849 | } |
848 | } | 850 | } |
849 | 851 | ||
852 | static void armv7pmu_reset(void *info) | ||
853 | { | ||
854 | u32 idx, nb_cnt = armpmu->num_events; | ||
855 | |||
856 | /* The counter and interrupt enable registers are unknown at reset. */ | ||
857 | for (idx = 1; idx < nb_cnt; ++idx) | ||
858 | armv7pmu_disable_event(NULL, idx); | ||
859 | |||
860 | /* Initialize & Reset PMNC: C and P bits */ | ||
861 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | ||
862 | } | ||
863 | |||
850 | static struct arm_pmu armv7pmu = { | 864 | static struct arm_pmu armv7pmu = { |
851 | .handle_irq = armv7pmu_handle_irq, | 865 | .handle_irq = armv7pmu_handle_irq, |
852 | .enable = armv7pmu_enable_event, | 866 | .enable = armv7pmu_enable_event, |
@@ -856,17 +870,15 @@ static struct arm_pmu armv7pmu = { | |||
856 | .get_event_idx = armv7pmu_get_event_idx, | 870 | .get_event_idx = armv7pmu_get_event_idx, |
857 | .start = armv7pmu_start, | 871 | .start = armv7pmu_start, |
858 | .stop = armv7pmu_stop, | 872 | .stop = armv7pmu_stop, |
873 | .reset = armv7pmu_reset, | ||
859 | .raw_event_mask = 0xFF, | 874 | .raw_event_mask = 0xFF, |
860 | .max_period = (1LLU << 32) - 1, | 875 | .max_period = (1LLU << 32) - 1, |
861 | }; | 876 | }; |
862 | 877 | ||
863 | static u32 __init armv7_reset_read_pmnc(void) | 878 | static u32 __init armv7_read_num_pmnc_events(void) |
864 | { | 879 | { |
865 | u32 nb_cnt; | 880 | u32 nb_cnt; |
866 | 881 | ||
867 | /* Initialize & Reset PMNC: C and P bits */ | ||
868 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | ||
869 | |||
870 | /* Read the nb of CNTx counters supported from PMNC */ | 882 | /* Read the nb of CNTx counters supported from PMNC */ |
871 | nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; | 883 | nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; |
872 | 884 | ||
@@ -880,7 +892,7 @@ static const struct arm_pmu *__init armv7_a8_pmu_init(void) | |||
880 | armv7pmu.name = "ARMv7 Cortex-A8"; | 892 | armv7pmu.name = "ARMv7 Cortex-A8"; |
881 | armv7pmu.cache_map = &armv7_a8_perf_cache_map; | 893 | armv7pmu.cache_map = &armv7_a8_perf_cache_map; |
882 | armv7pmu.event_map = &armv7_a8_perf_map; | 894 | armv7pmu.event_map = &armv7_a8_perf_map; |
883 | armv7pmu.num_events = armv7_reset_read_pmnc(); | 895 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
884 | return &armv7pmu; | 896 | return &armv7pmu; |
885 | } | 897 | } |
886 | 898 | ||
@@ -890,7 +902,7 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void) | |||
890 | armv7pmu.name = "ARMv7 Cortex-A9"; | 902 | armv7pmu.name = "ARMv7 Cortex-A9"; |
891 | armv7pmu.cache_map = &armv7_a9_perf_cache_map; | 903 | armv7pmu.cache_map = &armv7_a9_perf_cache_map; |
892 | armv7pmu.event_map = &armv7_a9_perf_map; | 904 | armv7pmu.event_map = &armv7_a9_perf_map; |
893 | armv7pmu.num_events = armv7_reset_read_pmnc(); | 905 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
894 | return &armv7pmu; | 906 | return &armv7pmu; |
895 | } | 907 | } |
896 | #else | 908 | #else |
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 28cd3b025bc3..39affbe4fdb2 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -246,7 +246,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
246 | continue; | 246 | continue; |
247 | 247 | ||
248 | hwc = &event->hw; | 248 | hwc = &event->hw; |
249 | armpmu_event_update(event, hwc, idx); | 249 | armpmu_event_update(event, hwc, idx, 1); |
250 | data.period = event->hw.last_period; | 250 | data.period = event->hw.last_period; |
251 | if (!armpmu_event_set_period(event, hwc, idx)) | 251 | if (!armpmu_event_set_period(event, hwc, idx)) |
252 | continue; | 252 | continue; |
@@ -578,7 +578,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
578 | continue; | 578 | continue; |
579 | 579 | ||
580 | hwc = &event->hw; | 580 | hwc = &event->hw; |
581 | armpmu_event_update(event, hwc, idx); | 581 | armpmu_event_update(event, hwc, idx, 1); |
582 | data.period = event->hw.last_period; | 582 | data.period = event->hw.last_period; |
583 | if (!armpmu_event_set_period(event, hwc, idx)) | 583 | if (!armpmu_event_set_period(event, hwc, idx)) |
584 | continue; | 584 | continue; |
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index bfad698a02e7..6398ead9d1c0 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S | |||
@@ -119,11 +119,19 @@ ENTRY(cpu_resume) | |||
119 | #else | 119 | #else |
120 | ldr r0, sleep_save_sp @ stack phys addr | 120 | ldr r0, sleep_save_sp @ stack phys addr |
121 | #endif | 121 | #endif |
122 | msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off | 122 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off |
123 | #ifdef MULTI_CPU | 123 | #ifdef MULTI_CPU |
124 | ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn | 124 | @ load v:p, stack, return fn, resume fn |
125 | ARM( ldmia r0!, {r1, sp, lr, pc} ) | ||
126 | THUMB( ldmia r0!, {r1, r2, r3, r4} ) | ||
127 | THUMB( mov sp, r2 ) | ||
128 | THUMB( mov lr, r3 ) | ||
129 | THUMB( bx r4 ) | ||
125 | #else | 130 | #else |
126 | ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn | 131 | @ load v:p, stack, return fn |
132 | ARM( ldmia r0!, {r1, sp, lr} ) | ||
133 | THUMB( ldmia r0!, {r1, r2, lr} ) | ||
134 | THUMB( mov sp, r2 ) | ||
127 | b cpu_do_resume | 135 | b cpu_do_resume |
128 | #endif | 136 | #endif |
129 | ENDPROC(cpu_resume) | 137 | ENDPROC(cpu_resume) |
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 7a5760922914..40ee7e5045e4 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c | |||
@@ -158,7 +158,7 @@ static int emulate_swpX(unsigned int address, unsigned int *data, | |||
158 | 158 | ||
159 | if (res == 0) { | 159 | if (res == 0) { |
160 | /* | 160 | /* |
161 | * Barrier also required between aquiring a lock for a | 161 | * Barrier also required between acquiring a lock for a |
162 | * protected resource and accessing the resource. Inserted for | 162 | * protected resource and accessing the resource. Inserted for |
163 | * same reason as above. | 163 | * same reason as above. |
164 | */ | 164 | */ |