diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /arch/arm/kernel | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/arm/kernel')
39 files changed, 3546 insertions, 686 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 79087dd6d869..26d302c28e13 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -17,6 +17,9 @@ obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \ | |||
17 | process.o ptrace.o return_address.o setup.o signal.o \ | 17 | process.o ptrace.o return_address.o setup.o signal.o \ |
18 | sys_arm.o stacktrace.o time.o traps.o | 18 | sys_arm.o stacktrace.o time.o traps.o |
19 | 19 | ||
20 | obj-$(CONFIG_LEDS) += leds.o | ||
21 | obj-$(CONFIG_OC_ETM) += etm.o | ||
22 | |||
20 | obj-$(CONFIG_ISA_DMA_API) += dma.o | 23 | obj-$(CONFIG_ISA_DMA_API) += dma.o |
21 | obj-$(CONFIG_ARCH_ACORN) += ecard.o | 24 | obj-$(CONFIG_ARCH_ACORN) += ecard.o |
22 | obj-$(CONFIG_FIQ) += fiq.o | 25 | obj-$(CONFIG_FIQ) += fiq.o |
@@ -44,6 +47,8 @@ obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o | |||
44 | obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o | 47 | obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o |
45 | obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o | 48 | obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o |
46 | obj-$(CONFIG_IWMMXT) += iwmmxt.o | 49 | obj-$(CONFIG_IWMMXT) += iwmmxt.o |
50 | obj-$(CONFIG_CPU_HAS_PMU) += pmu.o | ||
51 | obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o | ||
47 | AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt | 52 | AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt |
48 | 53 | ||
49 | ifneq ($(CONFIG_ARCH_EBSA110),y) | 54 | ifneq ($(CONFIG_ARCH_EBSA110),y) |
@@ -52,5 +57,6 @@ endif | |||
52 | 57 | ||
53 | head-y := head$(MMUEXT).o | 58 | head-y := head$(MMUEXT).o |
54 | obj-$(CONFIG_DEBUG_LL) += debug.o | 59 | obj-$(CONFIG_DEBUG_LL) += debug.o |
60 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | ||
55 | 61 | ||
56 | extra-y := $(head-y) init_task.o vmlinux.lds | 62 | extra-y := $(head-y) init_task.o vmlinux.lds |
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 0e627705f746..8214bfebfaca 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c | |||
@@ -48,27 +48,7 @@ extern void __aeabi_uidivmod(void); | |||
48 | extern void __aeabi_ulcmp(void); | 48 | extern void __aeabi_ulcmp(void); |
49 | 49 | ||
50 | extern void fpundefinstr(void); | 50 | extern void fpundefinstr(void); |
51 | extern void fp_enter(void); | ||
52 | 51 | ||
53 | /* | ||
54 | * This has a special calling convention; it doesn't | ||
55 | * modify any of the usual registers, except for LR. | ||
56 | */ | ||
57 | #define EXPORT_CRC_ALIAS(sym) __CRC_SYMBOL(sym, "") | ||
58 | |||
59 | #define EXPORT_SYMBOL_ALIAS(sym,orig) \ | ||
60 | EXPORT_CRC_ALIAS(sym) \ | ||
61 | static const struct kernel_symbol __ksymtab_##sym \ | ||
62 | __used __attribute__((section("__ksymtab"))) = \ | ||
63 | { (unsigned long)&orig, #sym }; | ||
64 | |||
65 | /* | ||
66 | * floating point math emulator support. | ||
67 | * These symbols will never change their calling convention... | ||
68 | */ | ||
69 | EXPORT_SYMBOL_ALIAS(kern_fp_enter,fp_enter); | ||
70 | EXPORT_SYMBOL_ALIAS(fp_printk,printk); | ||
71 | EXPORT_SYMBOL_ALIAS(fp_send_sig,send_sig); | ||
72 | 52 | ||
73 | EXPORT_SYMBOL(__backtrace); | 53 | EXPORT_SYMBOL(__backtrace); |
74 | 54 | ||
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 4a881258bb17..883511522fca 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/dma-mapping.h> | ||
15 | #include <asm/mach/arch.h> | 16 | #include <asm/mach/arch.h> |
16 | #include <asm/thread_info.h> | 17 | #include <asm/thread_info.h> |
17 | #include <asm/memory.h> | 18 | #include <asm/memory.h> |
@@ -112,5 +113,9 @@ int main(void) | |||
112 | #ifdef MULTI_PABORT | 113 | #ifdef MULTI_PABORT |
113 | DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); | 114 | DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); |
114 | #endif | 115 | #endif |
116 | BLANK(); | ||
117 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); | ||
118 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); | ||
119 | DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); | ||
115 | return 0; | 120 | return 0; |
116 | } | 121 | } |
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 809681900ec8..bd397e0b663e 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c | |||
@@ -616,15 +616,17 @@ char * __init pcibios_setup(char *str) | |||
616 | * but we want to try to avoid allocating at 0x2900-0x2bff | 616 | * but we want to try to avoid allocating at 0x2900-0x2bff |
617 | * which might be mirrored at 0x0100-0x03ff.. | 617 | * which might be mirrored at 0x0100-0x03ff.. |
618 | */ | 618 | */ |
619 | void pcibios_align_resource(void *data, struct resource *res, | 619 | resource_size_t pcibios_align_resource(void *data, const struct resource *res, |
620 | resource_size_t size, resource_size_t align) | 620 | resource_size_t size, resource_size_t align) |
621 | { | 621 | { |
622 | resource_size_t start = res->start; | 622 | resource_size_t start = res->start; |
623 | 623 | ||
624 | if (res->flags & IORESOURCE_IO && start & 0x300) | 624 | if (res->flags & IORESOURCE_IO && start & 0x300) |
625 | start = (start + 0x3ff) & ~0x3ff; | 625 | start = (start + 0x3ff) & ~0x3ff; |
626 | 626 | ||
627 | res->start = (start + align - 1) & ~(align - 1); | 627 | start = (start + align - 1) & ~(align - 1); |
628 | |||
629 | return start; | ||
628 | } | 630 | } |
629 | 631 | ||
630 | /** | 632 | /** |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index fafce1b5c69f..37ae301cc47c 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
@@ -91,7 +91,7 @@ | |||
91 | CALL(sys_settimeofday) | 91 | CALL(sys_settimeofday) |
92 | /* 80 */ CALL(sys_getgroups16) | 92 | /* 80 */ CALL(sys_getgroups16) |
93 | CALL(sys_setgroups16) | 93 | CALL(sys_setgroups16) |
94 | CALL(OBSOLETE(old_select)) /* used by libc4 */ | 94 | CALL(OBSOLETE(sys_old_select)) /* used by libc4 */ |
95 | CALL(sys_symlink) | 95 | CALL(sys_symlink) |
96 | CALL(sys_ni_syscall) /* was sys_lstat */ | 96 | CALL(sys_ni_syscall) /* was sys_lstat */ |
97 | /* 85 */ CALL(sys_readlink) | 97 | /* 85 */ CALL(sys_readlink) |
@@ -99,7 +99,7 @@ | |||
99 | CALL(sys_swapon) | 99 | CALL(sys_swapon) |
100 | CALL(sys_reboot) | 100 | CALL(sys_reboot) |
101 | CALL(OBSOLETE(sys_old_readdir)) /* used by libc4 */ | 101 | CALL(OBSOLETE(sys_old_readdir)) /* used by libc4 */ |
102 | /* 90 */ CALL(OBSOLETE(old_mmap)) /* used by libc4 */ | 102 | /* 90 */ CALL(OBSOLETE(sys_old_mmap)) /* used by libc4 */ |
103 | CALL(sys_munmap) | 103 | CALL(sys_munmap) |
104 | CALL(sys_truncate) | 104 | CALL(sys_truncate) |
105 | CALL(sys_ftruncate) | 105 | CALL(sys_ftruncate) |
@@ -172,7 +172,7 @@ | |||
172 | /* 160 */ CALL(sys_sched_get_priority_min) | 172 | /* 160 */ CALL(sys_sched_get_priority_min) |
173 | CALL(sys_sched_rr_get_interval) | 173 | CALL(sys_sched_rr_get_interval) |
174 | CALL(sys_nanosleep) | 174 | CALL(sys_nanosleep) |
175 | CALL(sys_arm_mremap) | 175 | CALL(sys_mremap) |
176 | CALL(sys_setresuid16) | 176 | CALL(sys_setresuid16) |
177 | /* 165 */ CALL(sys_getresuid16) | 177 | /* 165 */ CALL(sys_getresuid16) |
178 | CALL(sys_ni_syscall) /* vm86 */ | 178 | CALL(sys_ni_syscall) /* vm86 */ |
@@ -374,6 +374,7 @@ | |||
374 | CALL(sys_pwritev) | 374 | CALL(sys_pwritev) |
375 | CALL(sys_rt_tgsigqueueinfo) | 375 | CALL(sys_rt_tgsigqueueinfo) |
376 | CALL(sys_perf_event_open) | 376 | CALL(sys_perf_event_open) |
377 | /* 365 */ CALL(sys_recvmmsg) | ||
377 | #ifndef syscalls_counted | 378 | #ifndef syscalls_counted |
378 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 379 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
379 | #define syscalls_counted | 380 | #define syscalls_counted |
diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c index 769abe15cf91..25ef223ba7f3 100644 --- a/arch/arm/kernel/crunch.c +++ b/arch/arm/kernel/crunch.c | |||
@@ -51,7 +51,7 @@ static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t) | |||
51 | * initialised state information on the first fault. | 51 | * initialised state information on the first fault. |
52 | */ | 52 | */ |
53 | 53 | ||
54 | case THREAD_NOTIFY_RELEASE: | 54 | case THREAD_NOTIFY_EXIT: |
55 | crunch_task_release(thread); | 55 | crunch_task_release(thread); |
56 | break; | 56 | break; |
57 | 57 | ||
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index b121b6053cce..a38b4879441d 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | #if defined(CONFIG_CPU_V6) | 25 | #if defined(CONFIG_CPU_V6) |
26 | 26 | ||
27 | .macro addruart, rx | 27 | .macro addruart, rx, tmp |
28 | .endm | 28 | .endm |
29 | 29 | ||
30 | .macro senduart, rd, rx | 30 | .macro senduart, rd, rx |
@@ -49,9 +49,29 @@ | |||
49 | 1002: | 49 | 1002: |
50 | .endm | 50 | .endm |
51 | 51 | ||
52 | #elif defined(CONFIG_CPU_V7) | ||
53 | |||
54 | .macro addruart, rx, tmp | ||
55 | .endm | ||
56 | |||
57 | .macro senduart, rd, rx | ||
58 | mcr p14, 0, \rd, c0, c5, 0 | ||
59 | .endm | ||
60 | |||
61 | .macro busyuart, rd, rx | ||
62 | busy: mrc p14, 0, pc, c0, c1, 0 | ||
63 | bcs busy | ||
64 | .endm | ||
65 | |||
66 | .macro waituart, rd, rx | ||
67 | wait: mrc p14, 0, pc, c0, c1, 0 | ||
68 | bcs wait | ||
69 | |||
70 | .endm | ||
71 | |||
52 | #elif defined(CONFIG_CPU_XSCALE) | 72 | #elif defined(CONFIG_CPU_XSCALE) |
53 | 73 | ||
54 | .macro addruart, rx | 74 | .macro addruart, rx, tmp |
55 | .endm | 75 | .endm |
56 | 76 | ||
57 | .macro senduart, rd, rx | 77 | .macro senduart, rd, rx |
@@ -78,7 +98,7 @@ | |||
78 | 98 | ||
79 | #else | 99 | #else |
80 | 100 | ||
81 | .macro addruart, rx | 101 | .macro addruart, rx, tmp |
82 | .endm | 102 | .endm |
83 | 103 | ||
84 | .macro senduart, rd, rx | 104 | .macro senduart, rd, rx |
@@ -144,7 +164,7 @@ ENDPROC(printhex2) | |||
144 | .ltorg | 164 | .ltorg |
145 | 165 | ||
146 | ENTRY(printascii) | 166 | ENTRY(printascii) |
147 | addruart r3 | 167 | addruart r3, r1 |
148 | b 2f | 168 | b 2f |
149 | 1: waituart r2, r3 | 169 | 1: waituart r2, r3 |
150 | senduart r1, r3 | 170 | senduart r1, r3 |
@@ -160,7 +180,7 @@ ENTRY(printascii) | |||
160 | ENDPROC(printascii) | 180 | ENDPROC(printascii) |
161 | 181 | ||
162 | ENTRY(printch) | 182 | ENTRY(printch) |
163 | addruart r3 | 183 | addruart r3, r1 |
164 | mov r1, r0 | 184 | mov r1, r0 |
165 | mov r0, #0 | 185 | mov r0, #0 |
166 | b 1b | 186 | b 1b |
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c index 0e88e46fc732..360bb6d701f5 100644 --- a/arch/arm/kernel/dma-isa.c +++ b/arch/arm/kernel/dma-isa.c | |||
@@ -207,8 +207,6 @@ void __init isa_init_dma(void) | |||
207 | outb(0x32, 0x4d6); | 207 | outb(0x32, 0x4d6); |
208 | outb(0x33, 0x4d6); | 208 | outb(0x33, 0x4d6); |
209 | 209 | ||
210 | request_dma(DMA_ISA_CASCADE, "cascade"); | ||
211 | |||
212 | for (i = 0; i < ARRAY_SIZE(dma_resources); i++) | 210 | for (i = 0; i < ARRAY_SIZE(dma_resources); i++) |
213 | request_resource(&ioport_resource, dma_resources + i); | 211 | request_resource(&ioport_resource, dma_resources + i); |
214 | 212 | ||
@@ -218,5 +216,7 @@ void __init isa_init_dma(void) | |||
218 | printk(KERN_ERR "ISADMA%u: unable to register: %d\n", | 216 | printk(KERN_ERR "ISADMA%u: unable to register: %d\n", |
219 | chan, ret); | 217 | chan, ret); |
220 | } | 218 | } |
219 | |||
220 | request_dma(DMA_ISA_CASCADE, "cascade"); | ||
221 | } | 221 | } |
222 | } | 222 | } |
diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c new file mode 100644 index 000000000000..85aa2b292692 --- /dev/null +++ b/arch/arm/kernel/early_printk.c | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/early_printk.c | ||
3 | * | ||
4 | * Copyright (C) 2009 Sascha Hauer <s.hauer@pengutronix.de> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/console.h> | ||
13 | #include <linux/init.h> | ||
14 | |||
15 | extern void printch(int); | ||
16 | |||
17 | static void early_write(const char *s, unsigned n) | ||
18 | { | ||
19 | while (n-- > 0) { | ||
20 | if (*s == '\n') | ||
21 | printch('\r'); | ||
22 | printch(*s); | ||
23 | s++; | ||
24 | } | ||
25 | } | ||
26 | |||
27 | static void early_console_write(struct console *con, const char *s, unsigned n) | ||
28 | { | ||
29 | early_write(s, n); | ||
30 | } | ||
31 | |||
32 | static struct console early_console = { | ||
33 | .name = "earlycon", | ||
34 | .write = early_console_write, | ||
35 | .flags = CON_PRINTBUFFER | CON_BOOT, | ||
36 | .index = -1, | ||
37 | }; | ||
38 | |||
39 | asmlinkage void early_printk(const char *fmt, ...) | ||
40 | { | ||
41 | char buf[512]; | ||
42 | int n; | ||
43 | va_list ap; | ||
44 | |||
45 | va_start(ap, fmt); | ||
46 | n = vscnprintf(buf, sizeof(buf), fmt, ap); | ||
47 | early_write(buf, n); | ||
48 | va_end(ap); | ||
49 | } | ||
50 | |||
51 | static int __init setup_early_printk(char *buf) | ||
52 | { | ||
53 | register_console(&early_console); | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | early_param("earlyprintk", setup_early_printk); | ||
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c index 950391f194c4..d4a0da1e48f4 100644 --- a/arch/arm/kernel/elf.c +++ b/arch/arm/kernel/elf.c | |||
@@ -78,15 +78,6 @@ int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack) | |||
78 | return 1; | 78 | return 1; |
79 | if (cpu_architecture() < CPU_ARCH_ARMv6) | 79 | if (cpu_architecture() < CPU_ARCH_ARMv6) |
80 | return 1; | 80 | return 1; |
81 | #if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) | ||
82 | /* | ||
83 | * If we have support for OABI programs, we can never allow NX | ||
84 | * support - our signal syscall restart mechanism relies upon | ||
85 | * being able to execute code placed on the user stack. | ||
86 | */ | ||
87 | return 1; | ||
88 | #else | ||
89 | return 0; | 81 | return 0; |
90 | #endif | ||
91 | } | 82 | } |
92 | EXPORT_SYMBOL(arm_elf_read_implies_exec); | 83 | EXPORT_SYMBOL(arm_elf_read_implies_exec); |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d2903e3bc861..7ee48e7f8f31 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -523,16 +523,16 @@ ENDPROC(__und_usr) | |||
523 | /* | 523 | /* |
524 | * The out of line fixup for the ldrt above. | 524 | * The out of line fixup for the ldrt above. |
525 | */ | 525 | */ |
526 | .section .fixup, "ax" | 526 | .pushsection .fixup, "ax" |
527 | 4: mov pc, r9 | 527 | 4: mov pc, r9 |
528 | .previous | 528 | .popsection |
529 | .section __ex_table,"a" | 529 | .pushsection __ex_table,"a" |
530 | .long 1b, 4b | 530 | .long 1b, 4b |
531 | #if __LINUX_ARM_ARCH__ >= 7 | 531 | #if __LINUX_ARM_ARCH__ >= 7 |
532 | .long 2b, 4b | 532 | .long 2b, 4b |
533 | .long 3b, 4b | 533 | .long 3b, 4b |
534 | #endif | 534 | #endif |
535 | .previous | 535 | .popsection |
536 | 536 | ||
537 | /* | 537 | /* |
538 | * Check whether the instruction is a co-processor instruction. | 538 | * Check whether the instruction is a co-processor instruction. |
@@ -676,10 +676,10 @@ do_fpe: | |||
676 | * lr = unrecognised FP instruction return address | 676 | * lr = unrecognised FP instruction return address |
677 | */ | 677 | */ |
678 | 678 | ||
679 | .data | 679 | .pushsection .data |
680 | ENTRY(fp_enter) | 680 | ENTRY(fp_enter) |
681 | .word no_fp | 681 | .word no_fp |
682 | .previous | 682 | .popsection |
683 | 683 | ||
684 | ENTRY(no_fp) | 684 | ENTRY(no_fp) |
685 | mov pc, lr | 685 | mov pc, lr |
@@ -957,9 +957,7 @@ kuser_cmpxchg_fixup: | |||
957 | 957 | ||
958 | #else | 958 | #else |
959 | 959 | ||
960 | #ifdef CONFIG_SMP | 960 | smp_dmb |
961 | mcr p15, 0, r0, c7, c10, 5 @ dmb | ||
962 | #endif | ||
963 | 1: ldrex r3, [r2] | 961 | 1: ldrex r3, [r2] |
964 | subs r3, r3, r0 | 962 | subs r3, r3, r0 |
965 | strexeq r3, r1, [r2] | 963 | strexeq r3, r1, [r2] |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f0fe95b7085d..2c1db77d7848 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -416,12 +416,12 @@ sys_mmap2: | |||
416 | tst r5, #PGOFF_MASK | 416 | tst r5, #PGOFF_MASK |
417 | moveq r5, r5, lsr #PAGE_SHIFT - 12 | 417 | moveq r5, r5, lsr #PAGE_SHIFT - 12 |
418 | streq r5, [sp, #4] | 418 | streq r5, [sp, #4] |
419 | beq do_mmap2 | 419 | beq sys_mmap_pgoff |
420 | mov r0, #-EINVAL | 420 | mov r0, #-EINVAL |
421 | mov pc, lr | 421 | mov pc, lr |
422 | #else | 422 | #else |
423 | str r5, [sp, #4] | 423 | str r5, [sp, #4] |
424 | b do_mmap2 | 424 | b sys_mmap_pgoff |
425 | #endif | 425 | #endif |
426 | ENDPROC(sys_mmap2) | 426 | ENDPROC(sys_mmap2) |
427 | 427 | ||
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 7e9ed1eea40a..d93f976fb389 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S | |||
@@ -102,6 +102,8 @@ | |||
102 | .else | 102 | .else |
103 | ldmdb sp, {r0 - lr}^ @ get calling r0 - lr | 103 | ldmdb sp, {r0 - lr}^ @ get calling r0 - lr |
104 | .endif | 104 | .endif |
105 | mov r0, r0 @ ARMv5T and earlier require a nop | ||
106 | @ after ldm {}^ | ||
105 | add sp, sp, #S_FRAME_SIZE - S_PC | 107 | add sp, sp, #S_FRAME_SIZE - S_PC |
106 | movs pc, lr @ return & move spsr_svc into cpsr | 108 | movs pc, lr @ return & move spsr_svc into cpsr |
107 | .endm | 109 | .endm |
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c new file mode 100644 index 000000000000..827753966301 --- /dev/null +++ b/arch/arm/kernel/etm.c | |||
@@ -0,0 +1,641 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/etm.c | ||
3 | * | ||
4 | * Driver for ARM's Embedded Trace Macrocell and Embedded Trace Buffer. | ||
5 | * | ||
6 | * Copyright (C) 2009 Nokia Corporation. | ||
7 | * Alexander Shishkin | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/io.h> | ||
18 | #include <linux/sysrq.h> | ||
19 | #include <linux/device.h> | ||
20 | #include <linux/clk.h> | ||
21 | #include <linux/amba/bus.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/miscdevice.h> | ||
25 | #include <linux/vmalloc.h> | ||
26 | #include <linux/mutex.h> | ||
27 | #include <asm/hardware/coresight.h> | ||
28 | #include <asm/sections.h> | ||
29 | |||
30 | MODULE_LICENSE("GPL"); | ||
31 | MODULE_AUTHOR("Alexander Shishkin"); | ||
32 | |||
33 | static struct tracectx tracer; | ||
34 | |||
35 | static inline bool trace_isrunning(struct tracectx *t) | ||
36 | { | ||
37 | return !!(t->flags & TRACER_RUNNING); | ||
38 | } | ||
39 | |||
40 | static int etm_setup_address_range(struct tracectx *t, int n, | ||
41 | unsigned long start, unsigned long end, int exclude, int data) | ||
42 | { | ||
43 | u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \ | ||
44 | ETMAAT_NOVALCMP; | ||
45 | |||
46 | if (n < 1 || n > t->ncmppairs) | ||
47 | return -EINVAL; | ||
48 | |||
49 | /* comparators and ranges are numbered starting with 1 as opposed | ||
50 | * to bits in a word */ | ||
51 | n--; | ||
52 | |||
53 | if (data) | ||
54 | flags |= ETMAAT_DLOADSTORE; | ||
55 | else | ||
56 | flags |= ETMAAT_IEXEC; | ||
57 | |||
58 | /* first comparator for the range */ | ||
59 | etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2)); | ||
60 | etm_writel(t, start, ETMR_COMP_VAL(n * 2)); | ||
61 | |||
62 | /* second comparator is right next to it */ | ||
63 | etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1)); | ||
64 | etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1)); | ||
65 | |||
66 | flags = exclude ? ETMTE_INCLEXCL : 0; | ||
67 | etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL); | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static int trace_start(struct tracectx *t) | ||
73 | { | ||
74 | u32 v; | ||
75 | unsigned long timeout = TRACER_TIMEOUT; | ||
76 | |||
77 | etb_unlock(t); | ||
78 | |||
79 | etb_writel(t, 0, ETBR_FORMATTERCTRL); | ||
80 | etb_writel(t, 1, ETBR_CTRL); | ||
81 | |||
82 | etb_lock(t); | ||
83 | |||
84 | /* configure etm */ | ||
85 | v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz); | ||
86 | |||
87 | if (t->flags & TRACER_CYCLE_ACC) | ||
88 | v |= ETMCTRL_CYCLEACCURATE; | ||
89 | |||
90 | etm_unlock(t); | ||
91 | |||
92 | etm_writel(t, v, ETMR_CTRL); | ||
93 | |||
94 | while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout) | ||
95 | ; | ||
96 | if (!timeout) { | ||
97 | dev_dbg(t->dev, "Waiting for progbit to assert timed out\n"); | ||
98 | etm_lock(t); | ||
99 | return -EFAULT; | ||
100 | } | ||
101 | |||
102 | etm_setup_address_range(t, 1, (unsigned long)_stext, | ||
103 | (unsigned long)_etext, 0, 0); | ||
104 | etm_writel(t, 0, ETMR_TRACEENCTRL2); | ||
105 | etm_writel(t, 0, ETMR_TRACESSCTRL); | ||
106 | etm_writel(t, 0x6f, ETMR_TRACEENEVT); | ||
107 | |||
108 | v &= ~ETMCTRL_PROGRAM; | ||
109 | v |= ETMCTRL_PORTSEL; | ||
110 | |||
111 | etm_writel(t, v, ETMR_CTRL); | ||
112 | |||
113 | timeout = TRACER_TIMEOUT; | ||
114 | while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout) | ||
115 | ; | ||
116 | if (!timeout) { | ||
117 | dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n"); | ||
118 | etm_lock(t); | ||
119 | return -EFAULT; | ||
120 | } | ||
121 | |||
122 | etm_lock(t); | ||
123 | |||
124 | t->flags |= TRACER_RUNNING; | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static int trace_stop(struct tracectx *t) | ||
130 | { | ||
131 | unsigned long timeout = TRACER_TIMEOUT; | ||
132 | |||
133 | etm_unlock(t); | ||
134 | |||
135 | etm_writel(t, 0x440, ETMR_CTRL); | ||
136 | while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout) | ||
137 | ; | ||
138 | if (!timeout) { | ||
139 | dev_dbg(t->dev, "Waiting for progbit to assert timed out\n"); | ||
140 | etm_lock(t); | ||
141 | return -EFAULT; | ||
142 | } | ||
143 | |||
144 | etm_lock(t); | ||
145 | |||
146 | etb_unlock(t); | ||
147 | etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL); | ||
148 | |||
149 | timeout = TRACER_TIMEOUT; | ||
150 | while (etb_readl(t, ETBR_FORMATTERCTRL) & | ||
151 | ETBFF_MANUAL_FLUSH && --timeout) | ||
152 | ; | ||
153 | if (!timeout) { | ||
154 | dev_dbg(t->dev, "Waiting for formatter flush to commence " | ||
155 | "timed out\n"); | ||
156 | etb_lock(t); | ||
157 | return -EFAULT; | ||
158 | } | ||
159 | |||
160 | etb_writel(t, 0, ETBR_CTRL); | ||
161 | |||
162 | etb_lock(t); | ||
163 | |||
164 | t->flags &= ~TRACER_RUNNING; | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static int etb_getdatalen(struct tracectx *t) | ||
170 | { | ||
171 | u32 v; | ||
172 | int rp, wp; | ||
173 | |||
174 | v = etb_readl(t, ETBR_STATUS); | ||
175 | |||
176 | if (v & 1) | ||
177 | return t->etb_bufsz; | ||
178 | |||
179 | rp = etb_readl(t, ETBR_READADDR); | ||
180 | wp = etb_readl(t, ETBR_WRITEADDR); | ||
181 | |||
182 | if (rp > wp) { | ||
183 | etb_writel(t, 0, ETBR_READADDR); | ||
184 | etb_writel(t, 0, ETBR_WRITEADDR); | ||
185 | |||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | return wp - rp; | ||
190 | } | ||
191 | |||
192 | /* sysrq+v will always stop the running trace and leave it at that */ | ||
193 | static void etm_dump(void) | ||
194 | { | ||
195 | struct tracectx *t = &tracer; | ||
196 | u32 first = 0; | ||
197 | int length; | ||
198 | |||
199 | if (!t->etb_regs) { | ||
200 | printk(KERN_INFO "No tracing hardware found\n"); | ||
201 | return; | ||
202 | } | ||
203 | |||
204 | if (trace_isrunning(t)) | ||
205 | trace_stop(t); | ||
206 | |||
207 | etb_unlock(t); | ||
208 | |||
209 | length = etb_getdatalen(t); | ||
210 | |||
211 | if (length == t->etb_bufsz) | ||
212 | first = etb_readl(t, ETBR_WRITEADDR); | ||
213 | |||
214 | etb_writel(t, first, ETBR_READADDR); | ||
215 | |||
216 | printk(KERN_INFO "Trace buffer contents length: %d\n", length); | ||
217 | printk(KERN_INFO "--- ETB buffer begin ---\n"); | ||
218 | for (; length; length--) | ||
219 | printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM))); | ||
220 | printk(KERN_INFO "\n--- ETB buffer end ---\n"); | ||
221 | |||
222 | /* deassert the overflow bit */ | ||
223 | etb_writel(t, 1, ETBR_CTRL); | ||
224 | etb_writel(t, 0, ETBR_CTRL); | ||
225 | |||
226 | etb_writel(t, 0, ETBR_TRIGGERCOUNT); | ||
227 | etb_writel(t, 0, ETBR_READADDR); | ||
228 | etb_writel(t, 0, ETBR_WRITEADDR); | ||
229 | |||
230 | etb_lock(t); | ||
231 | } | ||
232 | |||
233 | static void sysrq_etm_dump(int key, struct tty_struct *tty) | ||
234 | { | ||
235 | dev_dbg(tracer.dev, "Dumping ETB buffer\n"); | ||
236 | etm_dump(); | ||
237 | } | ||
238 | |||
239 | static struct sysrq_key_op sysrq_etm_op = { | ||
240 | .handler = sysrq_etm_dump, | ||
241 | .help_msg = "ETM buffer dump", | ||
242 | .action_msg = "etm", | ||
243 | }; | ||
244 | |||
245 | static int etb_open(struct inode *inode, struct file *file) | ||
246 | { | ||
247 | if (!tracer.etb_regs) | ||
248 | return -ENODEV; | ||
249 | |||
250 | file->private_data = &tracer; | ||
251 | |||
252 | return nonseekable_open(inode, file); | ||
253 | } | ||
254 | |||
255 | static ssize_t etb_read(struct file *file, char __user *data, | ||
256 | size_t len, loff_t *ppos) | ||
257 | { | ||
258 | int total, i; | ||
259 | long length; | ||
260 | struct tracectx *t = file->private_data; | ||
261 | u32 first = 0; | ||
262 | u32 *buf; | ||
263 | |||
264 | mutex_lock(&t->mutex); | ||
265 | |||
266 | if (trace_isrunning(t)) { | ||
267 | length = 0; | ||
268 | goto out; | ||
269 | } | ||
270 | |||
271 | etb_unlock(t); | ||
272 | |||
273 | total = etb_getdatalen(t); | ||
274 | if (total == t->etb_bufsz) | ||
275 | first = etb_readl(t, ETBR_WRITEADDR); | ||
276 | |||
277 | etb_writel(t, first, ETBR_READADDR); | ||
278 | |||
279 | length = min(total * 4, (int)len); | ||
280 | buf = vmalloc(length); | ||
281 | |||
282 | dev_dbg(t->dev, "ETB buffer length: %d\n", total); | ||
283 | dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS)); | ||
284 | for (i = 0; i < length / 4; i++) | ||
285 | buf[i] = etb_readl(t, ETBR_READMEM); | ||
286 | |||
287 | /* the only way to deassert overflow bit in ETB status is this */ | ||
288 | etb_writel(t, 1, ETBR_CTRL); | ||
289 | etb_writel(t, 0, ETBR_CTRL); | ||
290 | |||
291 | etb_writel(t, 0, ETBR_WRITEADDR); | ||
292 | etb_writel(t, 0, ETBR_READADDR); | ||
293 | etb_writel(t, 0, ETBR_TRIGGERCOUNT); | ||
294 | |||
295 | etb_lock(t); | ||
296 | |||
297 | length -= copy_to_user(data, buf, length); | ||
298 | vfree(buf); | ||
299 | |||
300 | out: | ||
301 | mutex_unlock(&t->mutex); | ||
302 | |||
303 | return length; | ||
304 | } | ||
305 | |||
306 | static int etb_release(struct inode *inode, struct file *file) | ||
307 | { | ||
308 | /* there's nothing to do here, actually */ | ||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static const struct file_operations etb_fops = { | ||
313 | .owner = THIS_MODULE, | ||
314 | .read = etb_read, | ||
315 | .open = etb_open, | ||
316 | .release = etb_release, | ||
317 | }; | ||
318 | |||
319 | static struct miscdevice etb_miscdev = { | ||
320 | .name = "tracebuf", | ||
321 | .minor = 0, | ||
322 | .fops = &etb_fops, | ||
323 | }; | ||
324 | |||
325 | static int __init etb_probe(struct amba_device *dev, struct amba_id *id) | ||
326 | { | ||
327 | struct tracectx *t = &tracer; | ||
328 | int ret = 0; | ||
329 | |||
330 | ret = amba_request_regions(dev, NULL); | ||
331 | if (ret) | ||
332 | goto out; | ||
333 | |||
334 | t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res)); | ||
335 | if (!t->etb_regs) { | ||
336 | ret = -ENOMEM; | ||
337 | goto out_release; | ||
338 | } | ||
339 | |||
340 | amba_set_drvdata(dev, t); | ||
341 | |||
342 | etb_miscdev.parent = &dev->dev; | ||
343 | |||
344 | ret = misc_register(&etb_miscdev); | ||
345 | if (ret) | ||
346 | goto out_unmap; | ||
347 | |||
348 | t->emu_clk = clk_get(&dev->dev, "emu_src_ck"); | ||
349 | if (IS_ERR(t->emu_clk)) { | ||
350 | dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n"); | ||
351 | return -EFAULT; | ||
352 | } | ||
353 | |||
354 | clk_enable(t->emu_clk); | ||
355 | |||
356 | etb_unlock(t); | ||
357 | t->etb_bufsz = etb_readl(t, ETBR_DEPTH); | ||
358 | dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz); | ||
359 | |||
360 | /* make sure trace capture is disabled */ | ||
361 | etb_writel(t, 0, ETBR_CTRL); | ||
362 | etb_writel(t, 0x1000, ETBR_FORMATTERCTRL); | ||
363 | etb_lock(t); | ||
364 | |||
365 | dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n"); | ||
366 | |||
367 | out: | ||
368 | return ret; | ||
369 | |||
370 | out_unmap: | ||
371 | amba_set_drvdata(dev, NULL); | ||
372 | iounmap(t->etb_regs); | ||
373 | |||
374 | out_release: | ||
375 | amba_release_regions(dev); | ||
376 | |||
377 | return ret; | ||
378 | } | ||
379 | |||
380 | static int etb_remove(struct amba_device *dev) | ||
381 | { | ||
382 | struct tracectx *t = amba_get_drvdata(dev); | ||
383 | |||
384 | amba_set_drvdata(dev, NULL); | ||
385 | |||
386 | iounmap(t->etb_regs); | ||
387 | t->etb_regs = NULL; | ||
388 | |||
389 | clk_disable(t->emu_clk); | ||
390 | clk_put(t->emu_clk); | ||
391 | |||
392 | amba_release_regions(dev); | ||
393 | |||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | static struct amba_id etb_ids[] = { | ||
398 | { | ||
399 | .id = 0x0003b907, | ||
400 | .mask = 0x0007ffff, | ||
401 | }, | ||
402 | { 0, 0 }, | ||
403 | }; | ||
404 | |||
405 | static struct amba_driver etb_driver = { | ||
406 | .drv = { | ||
407 | .name = "etb", | ||
408 | .owner = THIS_MODULE, | ||
409 | }, | ||
410 | .probe = etb_probe, | ||
411 | .remove = etb_remove, | ||
412 | .id_table = etb_ids, | ||
413 | }; | ||
414 | |||
415 | /* use a sysfs file "trace_running" to start/stop tracing */ | ||
416 | static ssize_t trace_running_show(struct kobject *kobj, | ||
417 | struct kobj_attribute *attr, | ||
418 | char *buf) | ||
419 | { | ||
420 | return sprintf(buf, "%x\n", trace_isrunning(&tracer)); | ||
421 | } | ||
422 | |||
423 | static ssize_t trace_running_store(struct kobject *kobj, | ||
424 | struct kobj_attribute *attr, | ||
425 | const char *buf, size_t n) | ||
426 | { | ||
427 | unsigned int value; | ||
428 | int ret; | ||
429 | |||
430 | if (sscanf(buf, "%u", &value) != 1) | ||
431 | return -EINVAL; | ||
432 | |||
433 | mutex_lock(&tracer.mutex); | ||
434 | ret = value ? trace_start(&tracer) : trace_stop(&tracer); | ||
435 | mutex_unlock(&tracer.mutex); | ||
436 | |||
437 | return ret ? : n; | ||
438 | } | ||
439 | |||
440 | static struct kobj_attribute trace_running_attr = | ||
441 | __ATTR(trace_running, 0644, trace_running_show, trace_running_store); | ||
442 | |||
443 | static ssize_t trace_info_show(struct kobject *kobj, | ||
444 | struct kobj_attribute *attr, | ||
445 | char *buf) | ||
446 | { | ||
447 | u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st; | ||
448 | int datalen; | ||
449 | |||
450 | etb_unlock(&tracer); | ||
451 | datalen = etb_getdatalen(&tracer); | ||
452 | etb_wa = etb_readl(&tracer, ETBR_WRITEADDR); | ||
453 | etb_ra = etb_readl(&tracer, ETBR_READADDR); | ||
454 | etb_st = etb_readl(&tracer, ETBR_STATUS); | ||
455 | etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL); | ||
456 | etb_lock(&tracer); | ||
457 | |||
458 | etm_unlock(&tracer); | ||
459 | etm_ctrl = etm_readl(&tracer, ETMR_CTRL); | ||
460 | etm_st = etm_readl(&tracer, ETMR_STATUS); | ||
461 | etm_lock(&tracer); | ||
462 | |||
463 | return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n" | ||
464 | "ETBR_WRITEADDR:\t%08x\n" | ||
465 | "ETBR_READADDR:\t%08x\n" | ||
466 | "ETBR_STATUS:\t%08x\n" | ||
467 | "ETBR_FORMATTERCTRL:\t%08x\n" | ||
468 | "ETMR_CTRL:\t%08x\n" | ||
469 | "ETMR_STATUS:\t%08x\n", | ||
470 | datalen, | ||
471 | tracer.ncmppairs, | ||
472 | etb_wa, | ||
473 | etb_ra, | ||
474 | etb_st, | ||
475 | etb_fc, | ||
476 | etm_ctrl, | ||
477 | etm_st | ||
478 | ); | ||
479 | } | ||
480 | |||
481 | static struct kobj_attribute trace_info_attr = | ||
482 | __ATTR(trace_info, 0444, trace_info_show, NULL); | ||
483 | |||
484 | static ssize_t trace_mode_show(struct kobject *kobj, | ||
485 | struct kobj_attribute *attr, | ||
486 | char *buf) | ||
487 | { | ||
488 | return sprintf(buf, "%d %d\n", | ||
489 | !!(tracer.flags & TRACER_CYCLE_ACC), | ||
490 | tracer.etm_portsz); | ||
491 | } | ||
492 | |||
493 | static ssize_t trace_mode_store(struct kobject *kobj, | ||
494 | struct kobj_attribute *attr, | ||
495 | const char *buf, size_t n) | ||
496 | { | ||
497 | unsigned int cycacc, portsz; | ||
498 | |||
499 | if (sscanf(buf, "%u %u", &cycacc, &portsz) != 2) | ||
500 | return -EINVAL; | ||
501 | |||
502 | mutex_lock(&tracer.mutex); | ||
503 | if (cycacc) | ||
504 | tracer.flags |= TRACER_CYCLE_ACC; | ||
505 | else | ||
506 | tracer.flags &= ~TRACER_CYCLE_ACC; | ||
507 | |||
508 | tracer.etm_portsz = portsz & 0x0f; | ||
509 | mutex_unlock(&tracer.mutex); | ||
510 | |||
511 | return n; | ||
512 | } | ||
513 | |||
514 | static struct kobj_attribute trace_mode_attr = | ||
515 | __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store); | ||
516 | |||
517 | static int __init etm_probe(struct amba_device *dev, struct amba_id *id) | ||
518 | { | ||
519 | struct tracectx *t = &tracer; | ||
520 | int ret = 0; | ||
521 | |||
522 | if (t->etm_regs) { | ||
523 | dev_dbg(&dev->dev, "ETM already initialized\n"); | ||
524 | ret = -EBUSY; | ||
525 | goto out; | ||
526 | } | ||
527 | |||
528 | ret = amba_request_regions(dev, NULL); | ||
529 | if (ret) | ||
530 | goto out; | ||
531 | |||
532 | t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res)); | ||
533 | if (!t->etm_regs) { | ||
534 | ret = -ENOMEM; | ||
535 | goto out_release; | ||
536 | } | ||
537 | |||
538 | amba_set_drvdata(dev, t); | ||
539 | |||
540 | mutex_init(&t->mutex); | ||
541 | t->dev = &dev->dev; | ||
542 | t->flags = TRACER_CYCLE_ACC; | ||
543 | t->etm_portsz = 1; | ||
544 | |||
545 | etm_unlock(t); | ||
546 | ret = etm_readl(t, CSCR_PRSR); | ||
547 | |||
548 | t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf; | ||
549 | etm_writel(t, 0x440, ETMR_CTRL); | ||
550 | etm_lock(t); | ||
551 | |||
552 | ret = sysfs_create_file(&dev->dev.kobj, | ||
553 | &trace_running_attr.attr); | ||
554 | if (ret) | ||
555 | goto out_unmap; | ||
556 | |||
557 | /* failing to create any of these two is not fatal */ | ||
558 | ret = sysfs_create_file(&dev->dev.kobj, &trace_info_attr.attr); | ||
559 | if (ret) | ||
560 | dev_dbg(&dev->dev, "Failed to create trace_info in sysfs\n"); | ||
561 | |||
562 | ret = sysfs_create_file(&dev->dev.kobj, &trace_mode_attr.attr); | ||
563 | if (ret) | ||
564 | dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n"); | ||
565 | |||
566 | dev_dbg(t->dev, "ETM AMBA driver initialized.\n"); | ||
567 | |||
568 | out: | ||
569 | return ret; | ||
570 | |||
571 | out_unmap: | ||
572 | amba_set_drvdata(dev, NULL); | ||
573 | iounmap(t->etm_regs); | ||
574 | |||
575 | out_release: | ||
576 | amba_release_regions(dev); | ||
577 | |||
578 | return ret; | ||
579 | } | ||
580 | |||
581 | static int etm_remove(struct amba_device *dev) | ||
582 | { | ||
583 | struct tracectx *t = amba_get_drvdata(dev); | ||
584 | |||
585 | amba_set_drvdata(dev, NULL); | ||
586 | |||
587 | iounmap(t->etm_regs); | ||
588 | t->etm_regs = NULL; | ||
589 | |||
590 | amba_release_regions(dev); | ||
591 | |||
592 | sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr); | ||
593 | sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr); | ||
594 | sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr); | ||
595 | |||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | static struct amba_id etm_ids[] = { | ||
600 | { | ||
601 | .id = 0x0003b921, | ||
602 | .mask = 0x0007ffff, | ||
603 | }, | ||
604 | { 0, 0 }, | ||
605 | }; | ||
606 | |||
607 | static struct amba_driver etm_driver = { | ||
608 | .drv = { | ||
609 | .name = "etm", | ||
610 | .owner = THIS_MODULE, | ||
611 | }, | ||
612 | .probe = etm_probe, | ||
613 | .remove = etm_remove, | ||
614 | .id_table = etm_ids, | ||
615 | }; | ||
616 | |||
617 | static int __init etm_init(void) | ||
618 | { | ||
619 | int retval; | ||
620 | |||
621 | retval = amba_driver_register(&etb_driver); | ||
622 | if (retval) { | ||
623 | printk(KERN_ERR "Failed to register etb\n"); | ||
624 | return retval; | ||
625 | } | ||
626 | |||
627 | retval = amba_driver_register(&etm_driver); | ||
628 | if (retval) { | ||
629 | amba_driver_unregister(&etb_driver); | ||
630 | printk(KERN_ERR "Failed to probe etm\n"); | ||
631 | return retval; | ||
632 | } | ||
633 | |||
634 | /* not being able to install this handler is not fatal */ | ||
635 | (void)register_sysrq_key('v', &sysrq_etm_op); | ||
636 | |||
637 | return 0; | ||
638 | } | ||
639 | |||
640 | device_initcall(etm_init); | ||
641 | |||
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index c63842766229..0298286ad4ad 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c | |||
@@ -62,15 +62,15 @@ int ftrace_modify_code(unsigned long pc, unsigned char *old_code, | |||
62 | " movne %0, #2 \n" | 62 | " movne %0, #2 \n" |
63 | "3:\n" | 63 | "3:\n" |
64 | 64 | ||
65 | ".section .fixup, \"ax\"\n" | 65 | ".pushsection .fixup, \"ax\"\n" |
66 | "4: mov %0, #1 \n" | 66 | "4: mov %0, #1 \n" |
67 | " b 3b \n" | 67 | " b 3b \n" |
68 | ".previous\n" | 68 | ".popsection\n" |
69 | 69 | ||
70 | ".section __ex_table, \"a\"\n" | 70 | ".pushsection __ex_table, \"a\"\n" |
71 | " .long 1b, 4b \n" | 71 | " .long 1b, 4b \n" |
72 | " .long 2b, 4b \n" | 72 | " .long 2b, 4b \n" |
73 | ".previous\n" | 73 | ".popsection\n" |
74 | 74 | ||
75 | : "=r"(err), "=r"(replaced) | 75 | : "=r"(err), "=r"(replaced) |
76 | : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced) | 76 | : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced) |
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index e5dfc2895e24..573b803dc6bf 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -32,7 +32,7 @@ | |||
32 | * numbers for r1. | 32 | * numbers for r1. |
33 | * | 33 | * |
34 | */ | 34 | */ |
35 | .section ".text.head", "ax" | 35 | __HEAD |
36 | ENTRY(stext) | 36 | ENTRY(stext) |
37 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode | 37 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode |
38 | @ and irqs disabled | 38 | @ and irqs disabled |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 38ccbe1d3b2c..eb62bf947212 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -74,7 +74,7 @@ | |||
74 | * crap here - that's what the boot loader (or in extreme, well justified | 74 | * crap here - that's what the boot loader (or in extreme, well justified |
75 | * circumstances, zImage) is for. | 75 | * circumstances, zImage) is for. |
76 | */ | 76 | */ |
77 | .section ".text.head", "ax" | 77 | __HEAD |
78 | ENTRY(stext) | 78 | ENTRY(stext) |
79 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode | 79 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode |
80 | @ and irqs disabled | 80 | @ and irqs disabled |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index c9a8619f3856..3b3d2c80509c 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/ioport.h> | 27 | #include <linux/ioport.h> |
28 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
29 | #include <linux/irq.h> | 29 | #include <linux/irq.h> |
30 | #include <linux/slab.h> | ||
31 | #include <linux/random.h> | 30 | #include <linux/random.h> |
32 | #include <linux/smp.h> | 31 | #include <linux/smp.h> |
33 | #include <linux/init.h> | 32 | #include <linux/init.h> |
@@ -69,7 +68,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
69 | } | 68 | } |
70 | 69 | ||
71 | if (i < NR_IRQS) { | 70 | if (i < NR_IRQS) { |
72 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 71 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
73 | action = irq_desc[i].action; | 72 | action = irq_desc[i].action; |
74 | if (!action) | 73 | if (!action) |
75 | goto unlock; | 74 | goto unlock; |
@@ -84,7 +83,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
84 | 83 | ||
85 | seq_putc(p, '\n'); | 84 | seq_putc(p, '\n'); |
86 | unlock: | 85 | unlock: |
87 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 86 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
88 | } else if (i == NR_IRQS) { | 87 | } else if (i == NR_IRQS) { |
89 | #ifdef CONFIG_FIQ | 88 | #ifdef CONFIG_FIQ |
90 | show_fiq_list(p, v); | 89 | show_fiq_list(p, v); |
@@ -139,7 +138,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) | |||
139 | } | 138 | } |
140 | 139 | ||
141 | desc = irq_desc + irq; | 140 | desc = irq_desc + irq; |
142 | spin_lock_irqsave(&desc->lock, flags); | 141 | raw_spin_lock_irqsave(&desc->lock, flags); |
143 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; | 142 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; |
144 | if (iflags & IRQF_VALID) | 143 | if (iflags & IRQF_VALID) |
145 | desc->status &= ~IRQ_NOREQUEST; | 144 | desc->status &= ~IRQ_NOREQUEST; |
@@ -147,7 +146,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) | |||
147 | desc->status &= ~IRQ_NOPROBE; | 146 | desc->status &= ~IRQ_NOPROBE; |
148 | if (!(iflags & IRQF_NOAUTOEN)) | 147 | if (!(iflags & IRQF_NOAUTOEN)) |
149 | desc->status &= ~IRQ_NOAUTOEN; | 148 | desc->status &= ~IRQ_NOAUTOEN; |
150 | spin_unlock_irqrestore(&desc->lock, flags); | 149 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
151 | } | 150 | } |
152 | 151 | ||
153 | void __init init_IRQ(void) | 152 | void __init init_IRQ(void) |
@@ -166,9 +165,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) | |||
166 | { | 165 | { |
167 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); | 166 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); |
168 | 167 | ||
169 | spin_lock_irq(&desc->lock); | 168 | raw_spin_lock_irq(&desc->lock); |
170 | desc->chip->set_affinity(irq, cpumask_of(cpu)); | 169 | desc->chip->set_affinity(irq, cpumask_of(cpu)); |
171 | spin_unlock_irq(&desc->lock); | 170 | raw_spin_unlock_irq(&desc->lock); |
172 | } | 171 | } |
173 | 172 | ||
174 | /* | 173 | /* |
diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c index 8ac9b8424007..346485910732 100644 --- a/arch/arm/kernel/isa.c +++ b/arch/arm/kernel/isa.c | |||
@@ -22,47 +22,42 @@ static unsigned int isa_membase, isa_portbase, isa_portshift; | |||
22 | 22 | ||
23 | static ctl_table ctl_isa_vars[4] = { | 23 | static ctl_table ctl_isa_vars[4] = { |
24 | { | 24 | { |
25 | .ctl_name = BUS_ISA_MEM_BASE, | ||
26 | .procname = "membase", | 25 | .procname = "membase", |
27 | .data = &isa_membase, | 26 | .data = &isa_membase, |
28 | .maxlen = sizeof(isa_membase), | 27 | .maxlen = sizeof(isa_membase), |
29 | .mode = 0444, | 28 | .mode = 0444, |
30 | .proc_handler = &proc_dointvec, | 29 | .proc_handler = proc_dointvec, |
31 | }, { | 30 | }, { |
32 | .ctl_name = BUS_ISA_PORT_BASE, | ||
33 | .procname = "portbase", | 31 | .procname = "portbase", |
34 | .data = &isa_portbase, | 32 | .data = &isa_portbase, |
35 | .maxlen = sizeof(isa_portbase), | 33 | .maxlen = sizeof(isa_portbase), |
36 | .mode = 0444, | 34 | .mode = 0444, |
37 | .proc_handler = &proc_dointvec, | 35 | .proc_handler = proc_dointvec, |
38 | }, { | 36 | }, { |
39 | .ctl_name = BUS_ISA_PORT_SHIFT, | ||
40 | .procname = "portshift", | 37 | .procname = "portshift", |
41 | .data = &isa_portshift, | 38 | .data = &isa_portshift, |
42 | .maxlen = sizeof(isa_portshift), | 39 | .maxlen = sizeof(isa_portshift), |
43 | .mode = 0444, | 40 | .mode = 0444, |
44 | .proc_handler = &proc_dointvec, | 41 | .proc_handler = proc_dointvec, |
45 | }, {0} | 42 | }, {} |
46 | }; | 43 | }; |
47 | 44 | ||
48 | static struct ctl_table_header *isa_sysctl_header; | 45 | static struct ctl_table_header *isa_sysctl_header; |
49 | 46 | ||
50 | static ctl_table ctl_isa[2] = { | 47 | static ctl_table ctl_isa[2] = { |
51 | { | 48 | { |
52 | .ctl_name = CTL_BUS_ISA, | ||
53 | .procname = "isa", | 49 | .procname = "isa", |
54 | .mode = 0555, | 50 | .mode = 0555, |
55 | .child = ctl_isa_vars, | 51 | .child = ctl_isa_vars, |
56 | }, {0} | 52 | }, {} |
57 | }; | 53 | }; |
58 | 54 | ||
59 | static ctl_table ctl_bus[2] = { | 55 | static ctl_table ctl_bus[2] = { |
60 | { | 56 | { |
61 | .ctl_name = CTL_BUS, | ||
62 | .procname = "bus", | 57 | .procname = "bus", |
63 | .mode = 0555, | 58 | .mode = 0555, |
64 | .child = ctl_isa, | 59 | .child = ctl_isa, |
65 | }, {0} | 60 | }, {} |
66 | }; | 61 | }; |
67 | 62 | ||
68 | void __init | 63 | void __init |
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index ba8ccfede964..a5b846b9895d 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * Authors: George Davis <davis_g@mvista.com> | 9 | * Authors: George Davis <davis_g@mvista.com> |
10 | * Deepak Saxena <dsaxena@plexity.net> | 10 | * Deepak Saxena <dsaxena@plexity.net> |
11 | */ | 11 | */ |
12 | #include <linux/irq.h> | ||
12 | #include <linux/kgdb.h> | 13 | #include <linux/kgdb.h> |
13 | #include <asm/traps.h> | 14 | #include <asm/traps.h> |
14 | 15 | ||
@@ -158,6 +159,18 @@ static struct undef_hook kgdb_compiled_brkpt_hook = { | |||
158 | .fn = kgdb_compiled_brk_fn | 159 | .fn = kgdb_compiled_brk_fn |
159 | }; | 160 | }; |
160 | 161 | ||
162 | static void kgdb_call_nmi_hook(void *ignored) | ||
163 | { | ||
164 | kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); | ||
165 | } | ||
166 | |||
167 | void kgdb_roundup_cpus(unsigned long flags) | ||
168 | { | ||
169 | local_irq_enable(); | ||
170 | smp_call_function(kgdb_call_nmi_hook, NULL, 0); | ||
171 | local_irq_disable(); | ||
172 | } | ||
173 | |||
161 | /** | 174 | /** |
162 | * kgdb_arch_init - Perform any architecture specific initalization. | 175 | * kgdb_arch_init - Perform any architecture specific initalization. |
163 | * | 176 | * |
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 60c62c377fa9..2ba7deb3072e 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/kprobes.h> | 23 | #include <linux/kprobes.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/slab.h> | ||
25 | #include <linux/stop_machine.h> | 26 | #include <linux/stop_machine.h> |
26 | #include <linux/stringify.h> | 27 | #include <linux/stringify.h> |
27 | #include <asm/traps.h> | 28 | #include <asm/traps.h> |
@@ -393,6 +394,14 @@ void __kprobes jprobe_return(void) | |||
393 | /* | 394 | /* |
394 | * Setup an empty pt_regs. Fill SP and PC fields as | 395 | * Setup an empty pt_regs. Fill SP and PC fields as |
395 | * they're needed by longjmp_break_handler. | 396 | * they're needed by longjmp_break_handler. |
397 | * | ||
398 | * We allocate some slack between the original SP and start of | ||
399 | * our fabricated regs. To be precise we want to have worst case | ||
400 | * covered which is STMFD with all 16 regs so we allocate 2 * | ||
401 | * sizeof(struct_pt_regs)). | ||
402 | * | ||
403 | * This is to prevent any simulated instruction from writing | ||
404 | * over the regs when they are accessing the stack. | ||
396 | */ | 405 | */ |
397 | "sub sp, %0, %1 \n\t" | 406 | "sub sp, %0, %1 \n\t" |
398 | "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" | 407 | "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" |
@@ -410,7 +419,7 @@ void __kprobes jprobe_return(void) | |||
410 | "ldmia sp, {r0 - pc} \n\t" | 419 | "ldmia sp, {r0 - pc} \n\t" |
411 | : | 420 | : |
412 | : "r" (kcb->jprobe_saved_regs.ARM_sp), | 421 | : "r" (kcb->jprobe_saved_regs.ARM_sp), |
413 | "I" (sizeof(struct pt_regs)), | 422 | "I" (sizeof(struct pt_regs) * 2), |
414 | "J" (offsetof(struct pt_regs, ARM_sp)), | 423 | "J" (offsetof(struct pt_regs, ARM_sp)), |
415 | "J" (offsetof(struct pt_regs, ARM_pc)), | 424 | "J" (offsetof(struct pt_regs, ARM_pc)), |
416 | "J" (offsetof(struct pt_regs, ARM_cpsr)) | 425 | "J" (offsetof(struct pt_regs, ARM_cpsr)) |
diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c new file mode 100644 index 000000000000..31a316c1777b --- /dev/null +++ b/arch/arm/kernel/leds.c | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * LED support code, ripped out of arch/arm/kernel/time.c | ||
3 | * | ||
4 | * Copyright (C) 1994-2001 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/sysdev.h> | ||
13 | |||
14 | #include <asm/leds.h> | ||
15 | |||
16 | static void dummy_leds_event(led_event_t evt) | ||
17 | { | ||
18 | } | ||
19 | |||
20 | void (*leds_event)(led_event_t) = dummy_leds_event; | ||
21 | |||
22 | struct leds_evt_name { | ||
23 | const char name[8]; | ||
24 | int on; | ||
25 | int off; | ||
26 | }; | ||
27 | |||
28 | static const struct leds_evt_name evt_names[] = { | ||
29 | { "amber", led_amber_on, led_amber_off }, | ||
30 | { "blue", led_blue_on, led_blue_off }, | ||
31 | { "green", led_green_on, led_green_off }, | ||
32 | { "red", led_red_on, led_red_off }, | ||
33 | }; | ||
34 | |||
35 | static ssize_t leds_store(struct sys_device *dev, | ||
36 | struct sysdev_attribute *attr, | ||
37 | const char *buf, size_t size) | ||
38 | { | ||
39 | int ret = -EINVAL, len = strcspn(buf, " "); | ||
40 | |||
41 | if (len > 0 && buf[len] == '\0') | ||
42 | len--; | ||
43 | |||
44 | if (strncmp(buf, "claim", len) == 0) { | ||
45 | leds_event(led_claim); | ||
46 | ret = size; | ||
47 | } else if (strncmp(buf, "release", len) == 0) { | ||
48 | leds_event(led_release); | ||
49 | ret = size; | ||
50 | } else { | ||
51 | int i; | ||
52 | |||
53 | for (i = 0; i < ARRAY_SIZE(evt_names); i++) { | ||
54 | if (strlen(evt_names[i].name) != len || | ||
55 | strncmp(buf, evt_names[i].name, len) != 0) | ||
56 | continue; | ||
57 | if (strncmp(buf+len, " on", 3) == 0) { | ||
58 | leds_event(evt_names[i].on); | ||
59 | ret = size; | ||
60 | } else if (strncmp(buf+len, " off", 4) == 0) { | ||
61 | leds_event(evt_names[i].off); | ||
62 | ret = size; | ||
63 | } | ||
64 | break; | ||
65 | } | ||
66 | } | ||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | static SYSDEV_ATTR(event, 0200, NULL, leds_store); | ||
71 | |||
72 | static int leds_suspend(struct sys_device *dev, pm_message_t state) | ||
73 | { | ||
74 | leds_event(led_stop); | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static int leds_resume(struct sys_device *dev) | ||
79 | { | ||
80 | leds_event(led_start); | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static int leds_shutdown(struct sys_device *dev) | ||
85 | { | ||
86 | leds_event(led_halted); | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static struct sysdev_class leds_sysclass = { | ||
91 | .name = "leds", | ||
92 | .shutdown = leds_shutdown, | ||
93 | .suspend = leds_suspend, | ||
94 | .resume = leds_resume, | ||
95 | }; | ||
96 | |||
97 | static struct sys_device leds_device = { | ||
98 | .id = 0, | ||
99 | .cls = &leds_sysclass, | ||
100 | }; | ||
101 | |||
102 | static int __init leds_init(void) | ||
103 | { | ||
104 | int ret; | ||
105 | ret = sysdev_class_register(&leds_sysclass); | ||
106 | if (ret == 0) | ||
107 | ret = sysdev_register(&leds_device); | ||
108 | if (ret == 0) | ||
109 | ret = sysdev_create_file(&leds_device, &attr_event); | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | device_initcall(leds_init); | ||
114 | |||
115 | EXPORT_SYMBOL(leds_event); | ||
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index f28c5e9c51ea..c628bdf6c430 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c | |||
@@ -16,9 +16,9 @@ | |||
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/elf.h> | 17 | #include <linux/elf.h> |
18 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
19 | #include <linux/slab.h> | ||
20 | #include <linux/fs.h> | 19 | #include <linux/fs.h> |
21 | #include <linux/string.h> | 20 | #include <linux/string.h> |
21 | #include <linux/gfp.h> | ||
22 | 22 | ||
23 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
24 | #include <asm/sections.h> | 24 | #include <asm/sections.h> |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c new file mode 100644 index 000000000000..9e70f2053f9a --- /dev/null +++ b/arch/arm/kernel/perf_event.c | |||
@@ -0,0 +1,2277 @@ | |||
1 | #undef DEBUG | ||
2 | |||
3 | /* | ||
4 | * ARM performance counter support. | ||
5 | * | ||
6 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | ||
7 | * | ||
8 | * ARMv7 support: Jean Pihet <jpihet@mvista.com> | ||
9 | * 2010 (c) MontaVista Software, LLC. | ||
10 | * | ||
11 | * This code is based on the sparc64 perf event code, which is in turn based | ||
12 | * on the x86 code. Callchain code is based on the ARM OProfile backtrace | ||
13 | * code. | ||
14 | */ | ||
15 | #define pr_fmt(fmt) "hw perfevents: " fmt | ||
16 | |||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/perf_event.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | |||
23 | #include <asm/cputype.h> | ||
24 | #include <asm/irq.h> | ||
25 | #include <asm/irq_regs.h> | ||
26 | #include <asm/pmu.h> | ||
27 | #include <asm/stacktrace.h> | ||
28 | |||
29 | static const struct pmu_irqs *pmu_irqs; | ||
30 | |||
31 | /* | ||
32 | * Hardware lock to serialize accesses to PMU registers. Needed for the | ||
33 | * read/modify/write sequences. | ||
34 | */ | ||
35 | DEFINE_SPINLOCK(pmu_lock); | ||
36 | |||
37 | /* | ||
38 | * ARMv6 supports a maximum of 3 events, starting from index 1. If we add | ||
39 | * another platform that supports more, we need to increase this to be the | ||
40 | * largest of all platforms. | ||
41 | * | ||
42 | * ARMv7 supports up to 32 events: | ||
43 | * cycle counter CCNT + 31 events counters CNT0..30. | ||
44 | * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. | ||
45 | */ | ||
46 | #define ARMPMU_MAX_HWEVENTS 33 | ||
47 | |||
48 | /* The events for a given CPU. */ | ||
49 | struct cpu_hw_events { | ||
50 | /* | ||
51 | * The events that are active on the CPU for the given index. Index 0 | ||
52 | * is reserved. | ||
53 | */ | ||
54 | struct perf_event *events[ARMPMU_MAX_HWEVENTS]; | ||
55 | |||
56 | /* | ||
57 | * A 1 bit for an index indicates that the counter is being used for | ||
58 | * an event. A 0 means that the counter can be used. | ||
59 | */ | ||
60 | unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; | ||
61 | |||
62 | /* | ||
63 | * A 1 bit for an index indicates that the counter is actively being | ||
64 | * used. | ||
65 | */ | ||
66 | unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; | ||
67 | }; | ||
68 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | ||
69 | |||
70 | struct arm_pmu { | ||
71 | char *name; | ||
72 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | ||
73 | void (*enable)(struct hw_perf_event *evt, int idx); | ||
74 | void (*disable)(struct hw_perf_event *evt, int idx); | ||
75 | int (*event_map)(int evt); | ||
76 | u64 (*raw_event)(u64); | ||
77 | int (*get_event_idx)(struct cpu_hw_events *cpuc, | ||
78 | struct hw_perf_event *hwc); | ||
79 | u32 (*read_counter)(int idx); | ||
80 | void (*write_counter)(int idx, u32 val); | ||
81 | void (*start)(void); | ||
82 | void (*stop)(void); | ||
83 | int num_events; | ||
84 | u64 max_period; | ||
85 | }; | ||
86 | |||
87 | /* Set at runtime when we know what CPU type we are. */ | ||
88 | static const struct arm_pmu *armpmu; | ||
89 | |||
90 | #define HW_OP_UNSUPPORTED 0xFFFF | ||
91 | |||
92 | #define C(_x) \ | ||
93 | PERF_COUNT_HW_CACHE_##_x | ||
94 | |||
95 | #define CACHE_OP_UNSUPPORTED 0xFFFF | ||
96 | |||
97 | static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
98 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
99 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
100 | |||
101 | static int | ||
102 | armpmu_map_cache_event(u64 config) | ||
103 | { | ||
104 | unsigned int cache_type, cache_op, cache_result, ret; | ||
105 | |||
106 | cache_type = (config >> 0) & 0xff; | ||
107 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | ||
108 | return -EINVAL; | ||
109 | |||
110 | cache_op = (config >> 8) & 0xff; | ||
111 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | ||
112 | return -EINVAL; | ||
113 | |||
114 | cache_result = (config >> 16) & 0xff; | ||
115 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
116 | return -EINVAL; | ||
117 | |||
118 | ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result]; | ||
119 | |||
120 | if (ret == CACHE_OP_UNSUPPORTED) | ||
121 | return -ENOENT; | ||
122 | |||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | static int | ||
127 | armpmu_event_set_period(struct perf_event *event, | ||
128 | struct hw_perf_event *hwc, | ||
129 | int idx) | ||
130 | { | ||
131 | s64 left = atomic64_read(&hwc->period_left); | ||
132 | s64 period = hwc->sample_period; | ||
133 | int ret = 0; | ||
134 | |||
135 | if (unlikely(left <= -period)) { | ||
136 | left = period; | ||
137 | atomic64_set(&hwc->period_left, left); | ||
138 | hwc->last_period = period; | ||
139 | ret = 1; | ||
140 | } | ||
141 | |||
142 | if (unlikely(left <= 0)) { | ||
143 | left += period; | ||
144 | atomic64_set(&hwc->period_left, left); | ||
145 | hwc->last_period = period; | ||
146 | ret = 1; | ||
147 | } | ||
148 | |||
149 | if (left > (s64)armpmu->max_period) | ||
150 | left = armpmu->max_period; | ||
151 | |||
152 | atomic64_set(&hwc->prev_count, (u64)-left); | ||
153 | |||
154 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); | ||
155 | |||
156 | perf_event_update_userpage(event); | ||
157 | |||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | static u64 | ||
162 | armpmu_event_update(struct perf_event *event, | ||
163 | struct hw_perf_event *hwc, | ||
164 | int idx) | ||
165 | { | ||
166 | int shift = 64 - 32; | ||
167 | s64 prev_raw_count, new_raw_count; | ||
168 | s64 delta; | ||
169 | |||
170 | again: | ||
171 | prev_raw_count = atomic64_read(&hwc->prev_count); | ||
172 | new_raw_count = armpmu->read_counter(idx); | ||
173 | |||
174 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
175 | new_raw_count) != prev_raw_count) | ||
176 | goto again; | ||
177 | |||
178 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | ||
179 | delta >>= shift; | ||
180 | |||
181 | atomic64_add(delta, &event->count); | ||
182 | atomic64_sub(delta, &hwc->period_left); | ||
183 | |||
184 | return new_raw_count; | ||
185 | } | ||
186 | |||
187 | static void | ||
188 | armpmu_disable(struct perf_event *event) | ||
189 | { | ||
190 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
191 | struct hw_perf_event *hwc = &event->hw; | ||
192 | int idx = hwc->idx; | ||
193 | |||
194 | WARN_ON(idx < 0); | ||
195 | |||
196 | clear_bit(idx, cpuc->active_mask); | ||
197 | armpmu->disable(hwc, idx); | ||
198 | |||
199 | barrier(); | ||
200 | |||
201 | armpmu_event_update(event, hwc, idx); | ||
202 | cpuc->events[idx] = NULL; | ||
203 | clear_bit(idx, cpuc->used_mask); | ||
204 | |||
205 | perf_event_update_userpage(event); | ||
206 | } | ||
207 | |||
208 | static void | ||
209 | armpmu_read(struct perf_event *event) | ||
210 | { | ||
211 | struct hw_perf_event *hwc = &event->hw; | ||
212 | |||
213 | /* Don't read disabled counters! */ | ||
214 | if (hwc->idx < 0) | ||
215 | return; | ||
216 | |||
217 | armpmu_event_update(event, hwc, hwc->idx); | ||
218 | } | ||
219 | |||
220 | static void | ||
221 | armpmu_unthrottle(struct perf_event *event) | ||
222 | { | ||
223 | struct hw_perf_event *hwc = &event->hw; | ||
224 | |||
225 | /* | ||
226 | * Set the period again. Some counters can't be stopped, so when we | ||
227 | * were throttled we simply disabled the IRQ source and the counter | ||
228 | * may have been left counting. If we don't do this step then we may | ||
229 | * get an interrupt too soon or *way* too late if the overflow has | ||
230 | * happened since disabling. | ||
231 | */ | ||
232 | armpmu_event_set_period(event, hwc, hwc->idx); | ||
233 | armpmu->enable(hwc, hwc->idx); | ||
234 | } | ||
235 | |||
236 | static int | ||
237 | armpmu_enable(struct perf_event *event) | ||
238 | { | ||
239 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
240 | struct hw_perf_event *hwc = &event->hw; | ||
241 | int idx; | ||
242 | int err = 0; | ||
243 | |||
244 | /* If we don't have a space for the counter then finish early. */ | ||
245 | idx = armpmu->get_event_idx(cpuc, hwc); | ||
246 | if (idx < 0) { | ||
247 | err = idx; | ||
248 | goto out; | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * If there is an event in the counter we are going to use then make | ||
253 | * sure it is disabled. | ||
254 | */ | ||
255 | event->hw.idx = idx; | ||
256 | armpmu->disable(hwc, idx); | ||
257 | cpuc->events[idx] = event; | ||
258 | set_bit(idx, cpuc->active_mask); | ||
259 | |||
260 | /* Set the period for the event. */ | ||
261 | armpmu_event_set_period(event, hwc, idx); | ||
262 | |||
263 | /* Enable the event. */ | ||
264 | armpmu->enable(hwc, idx); | ||
265 | |||
266 | /* Propagate our changes to the userspace mapping. */ | ||
267 | perf_event_update_userpage(event); | ||
268 | |||
269 | out: | ||
270 | return err; | ||
271 | } | ||
272 | |||
273 | static struct pmu pmu = { | ||
274 | .enable = armpmu_enable, | ||
275 | .disable = armpmu_disable, | ||
276 | .unthrottle = armpmu_unthrottle, | ||
277 | .read = armpmu_read, | ||
278 | }; | ||
279 | |||
280 | static int | ||
281 | validate_event(struct cpu_hw_events *cpuc, | ||
282 | struct perf_event *event) | ||
283 | { | ||
284 | struct hw_perf_event fake_event = event->hw; | ||
285 | |||
286 | if (event->pmu && event->pmu != &pmu) | ||
287 | return 0; | ||
288 | |||
289 | return armpmu->get_event_idx(cpuc, &fake_event) >= 0; | ||
290 | } | ||
291 | |||
292 | static int | ||
293 | validate_group(struct perf_event *event) | ||
294 | { | ||
295 | struct perf_event *sibling, *leader = event->group_leader; | ||
296 | struct cpu_hw_events fake_pmu; | ||
297 | |||
298 | memset(&fake_pmu, 0, sizeof(fake_pmu)); | ||
299 | |||
300 | if (!validate_event(&fake_pmu, leader)) | ||
301 | return -ENOSPC; | ||
302 | |||
303 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | ||
304 | if (!validate_event(&fake_pmu, sibling)) | ||
305 | return -ENOSPC; | ||
306 | } | ||
307 | |||
308 | if (!validate_event(&fake_pmu, event)) | ||
309 | return -ENOSPC; | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | static int | ||
315 | armpmu_reserve_hardware(void) | ||
316 | { | ||
317 | int i; | ||
318 | int err; | ||
319 | |||
320 | pmu_irqs = reserve_pmu(); | ||
321 | if (IS_ERR(pmu_irqs)) { | ||
322 | pr_warning("unable to reserve pmu\n"); | ||
323 | return PTR_ERR(pmu_irqs); | ||
324 | } | ||
325 | |||
326 | init_pmu(); | ||
327 | |||
328 | if (pmu_irqs->num_irqs < 1) { | ||
329 | pr_err("no irqs for PMUs defined\n"); | ||
330 | return -ENODEV; | ||
331 | } | ||
332 | |||
333 | for (i = 0; i < pmu_irqs->num_irqs; ++i) { | ||
334 | err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq, | ||
335 | IRQF_DISABLED | IRQF_NOBALANCING, | ||
336 | "armpmu", NULL); | ||
337 | if (err) { | ||
338 | pr_warning("unable to request IRQ%d for ARM " | ||
339 | "perf counters\n", pmu_irqs->irqs[i]); | ||
340 | break; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | if (err) { | ||
345 | for (i = i - 1; i >= 0; --i) | ||
346 | free_irq(pmu_irqs->irqs[i], NULL); | ||
347 | release_pmu(pmu_irqs); | ||
348 | pmu_irqs = NULL; | ||
349 | } | ||
350 | |||
351 | return err; | ||
352 | } | ||
353 | |||
354 | static void | ||
355 | armpmu_release_hardware(void) | ||
356 | { | ||
357 | int i; | ||
358 | |||
359 | for (i = pmu_irqs->num_irqs - 1; i >= 0; --i) | ||
360 | free_irq(pmu_irqs->irqs[i], NULL); | ||
361 | armpmu->stop(); | ||
362 | |||
363 | release_pmu(pmu_irqs); | ||
364 | pmu_irqs = NULL; | ||
365 | } | ||
366 | |||
367 | static atomic_t active_events = ATOMIC_INIT(0); | ||
368 | static DEFINE_MUTEX(pmu_reserve_mutex); | ||
369 | |||
370 | static void | ||
371 | hw_perf_event_destroy(struct perf_event *event) | ||
372 | { | ||
373 | if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { | ||
374 | armpmu_release_hardware(); | ||
375 | mutex_unlock(&pmu_reserve_mutex); | ||
376 | } | ||
377 | } | ||
378 | |||
379 | static int | ||
380 | __hw_perf_event_init(struct perf_event *event) | ||
381 | { | ||
382 | struct hw_perf_event *hwc = &event->hw; | ||
383 | int mapping, err; | ||
384 | |||
385 | /* Decode the generic type into an ARM event identifier. */ | ||
386 | if (PERF_TYPE_HARDWARE == event->attr.type) { | ||
387 | mapping = armpmu->event_map(event->attr.config); | ||
388 | } else if (PERF_TYPE_HW_CACHE == event->attr.type) { | ||
389 | mapping = armpmu_map_cache_event(event->attr.config); | ||
390 | } else if (PERF_TYPE_RAW == event->attr.type) { | ||
391 | mapping = armpmu->raw_event(event->attr.config); | ||
392 | } else { | ||
393 | pr_debug("event type %x not supported\n", event->attr.type); | ||
394 | return -EOPNOTSUPP; | ||
395 | } | ||
396 | |||
397 | if (mapping < 0) { | ||
398 | pr_debug("event %x:%llx not supported\n", event->attr.type, | ||
399 | event->attr.config); | ||
400 | return mapping; | ||
401 | } | ||
402 | |||
403 | /* | ||
404 | * Check whether we need to exclude the counter from certain modes. | ||
405 | * The ARM performance counters are on all of the time so if someone | ||
406 | * has asked us for some excludes then we have to fail. | ||
407 | */ | ||
408 | if (event->attr.exclude_kernel || event->attr.exclude_user || | ||
409 | event->attr.exclude_hv || event->attr.exclude_idle) { | ||
410 | pr_debug("ARM performance counters do not support " | ||
411 | "mode exclusion\n"); | ||
412 | return -EPERM; | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * We don't assign an index until we actually place the event onto | ||
417 | * hardware. Use -1 to signify that we haven't decided where to put it | ||
418 | * yet. For SMP systems, each core has it's own PMU so we can't do any | ||
419 | * clever allocation or constraints checking at this point. | ||
420 | */ | ||
421 | hwc->idx = -1; | ||
422 | |||
423 | /* | ||
424 | * Store the event encoding into the config_base field. config and | ||
425 | * event_base are unused as the only 2 things we need to know are | ||
426 | * the event mapping and the counter to use. The counter to use is | ||
427 | * also the indx and the config_base is the event type. | ||
428 | */ | ||
429 | hwc->config_base = (unsigned long)mapping; | ||
430 | hwc->config = 0; | ||
431 | hwc->event_base = 0; | ||
432 | |||
433 | if (!hwc->sample_period) { | ||
434 | hwc->sample_period = armpmu->max_period; | ||
435 | hwc->last_period = hwc->sample_period; | ||
436 | atomic64_set(&hwc->period_left, hwc->sample_period); | ||
437 | } | ||
438 | |||
439 | err = 0; | ||
440 | if (event->group_leader != event) { | ||
441 | err = validate_group(event); | ||
442 | if (err) | ||
443 | return -EINVAL; | ||
444 | } | ||
445 | |||
446 | return err; | ||
447 | } | ||
448 | |||
449 | const struct pmu * | ||
450 | hw_perf_event_init(struct perf_event *event) | ||
451 | { | ||
452 | int err = 0; | ||
453 | |||
454 | if (!armpmu) | ||
455 | return ERR_PTR(-ENODEV); | ||
456 | |||
457 | event->destroy = hw_perf_event_destroy; | ||
458 | |||
459 | if (!atomic_inc_not_zero(&active_events)) { | ||
460 | if (atomic_read(&active_events) > perf_max_events) { | ||
461 | atomic_dec(&active_events); | ||
462 | return ERR_PTR(-ENOSPC); | ||
463 | } | ||
464 | |||
465 | mutex_lock(&pmu_reserve_mutex); | ||
466 | if (atomic_read(&active_events) == 0) { | ||
467 | err = armpmu_reserve_hardware(); | ||
468 | } | ||
469 | |||
470 | if (!err) | ||
471 | atomic_inc(&active_events); | ||
472 | mutex_unlock(&pmu_reserve_mutex); | ||
473 | } | ||
474 | |||
475 | if (err) | ||
476 | return ERR_PTR(err); | ||
477 | |||
478 | err = __hw_perf_event_init(event); | ||
479 | if (err) | ||
480 | hw_perf_event_destroy(event); | ||
481 | |||
482 | return err ? ERR_PTR(err) : &pmu; | ||
483 | } | ||
484 | |||
485 | void | ||
486 | hw_perf_enable(void) | ||
487 | { | ||
488 | /* Enable all of the perf events on hardware. */ | ||
489 | int idx; | ||
490 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
491 | |||
492 | if (!armpmu) | ||
493 | return; | ||
494 | |||
495 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
496 | struct perf_event *event = cpuc->events[idx]; | ||
497 | |||
498 | if (!event) | ||
499 | continue; | ||
500 | |||
501 | armpmu->enable(&event->hw, idx); | ||
502 | } | ||
503 | |||
504 | armpmu->start(); | ||
505 | } | ||
506 | |||
507 | void | ||
508 | hw_perf_disable(void) | ||
509 | { | ||
510 | if (armpmu) | ||
511 | armpmu->stop(); | ||
512 | } | ||
513 | |||
514 | /* | ||
515 | * ARMv6 Performance counter handling code. | ||
516 | * | ||
517 | * ARMv6 has 2 configurable performance counters and a single cycle counter. | ||
518 | * They all share a single reset bit but can be written to zero so we can use | ||
519 | * that for a reset. | ||
520 | * | ||
521 | * The counters can't be individually enabled or disabled so when we remove | ||
522 | * one event and replace it with another we could get spurious counts from the | ||
523 | * wrong event. However, we can take advantage of the fact that the | ||
524 | * performance counters can export events to the event bus, and the event bus | ||
525 | * itself can be monitored. This requires that we *don't* export the events to | ||
526 | * the event bus. The procedure for disabling a configurable counter is: | ||
527 | * - change the counter to count the ETMEXTOUT[0] signal (0x20). This | ||
528 | * effectively stops the counter from counting. | ||
529 | * - disable the counter's interrupt generation (each counter has it's | ||
530 | * own interrupt enable bit). | ||
531 | * Once stopped, the counter value can be written as 0 to reset. | ||
532 | * | ||
533 | * To enable a counter: | ||
534 | * - enable the counter's interrupt generation. | ||
535 | * - set the new event type. | ||
536 | * | ||
537 | * Note: the dedicated cycle counter only counts cycles and can't be | ||
538 | * enabled/disabled independently of the others. When we want to disable the | ||
539 | * cycle counter, we have to just disable the interrupt reporting and start | ||
540 | * ignoring that counter. When re-enabling, we have to reset the value and | ||
541 | * enable the interrupt. | ||
542 | */ | ||
543 | |||
544 | enum armv6_perf_types { | ||
545 | ARMV6_PERFCTR_ICACHE_MISS = 0x0, | ||
546 | ARMV6_PERFCTR_IBUF_STALL = 0x1, | ||
547 | ARMV6_PERFCTR_DDEP_STALL = 0x2, | ||
548 | ARMV6_PERFCTR_ITLB_MISS = 0x3, | ||
549 | ARMV6_PERFCTR_DTLB_MISS = 0x4, | ||
550 | ARMV6_PERFCTR_BR_EXEC = 0x5, | ||
551 | ARMV6_PERFCTR_BR_MISPREDICT = 0x6, | ||
552 | ARMV6_PERFCTR_INSTR_EXEC = 0x7, | ||
553 | ARMV6_PERFCTR_DCACHE_HIT = 0x9, | ||
554 | ARMV6_PERFCTR_DCACHE_ACCESS = 0xA, | ||
555 | ARMV6_PERFCTR_DCACHE_MISS = 0xB, | ||
556 | ARMV6_PERFCTR_DCACHE_WBACK = 0xC, | ||
557 | ARMV6_PERFCTR_SW_PC_CHANGE = 0xD, | ||
558 | ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF, | ||
559 | ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10, | ||
560 | ARMV6_PERFCTR_LSU_FULL_STALL = 0x11, | ||
561 | ARMV6_PERFCTR_WBUF_DRAINED = 0x12, | ||
562 | ARMV6_PERFCTR_CPU_CYCLES = 0xFF, | ||
563 | ARMV6_PERFCTR_NOP = 0x20, | ||
564 | }; | ||
565 | |||
566 | enum armv6_counters { | ||
567 | ARMV6_CYCLE_COUNTER = 1, | ||
568 | ARMV6_COUNTER0, | ||
569 | ARMV6_COUNTER1, | ||
570 | }; | ||
571 | |||
572 | /* | ||
573 | * The hardware events that we support. We do support cache operations but | ||
574 | * we have harvard caches and no way to combine instruction and data | ||
575 | * accesses/misses in hardware. | ||
576 | */ | ||
577 | static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { | ||
578 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, | ||
579 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, | ||
580 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
581 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
582 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, | ||
583 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, | ||
584 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | ||
585 | }; | ||
586 | |||
587 | static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
588 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
589 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
590 | [C(L1D)] = { | ||
591 | /* | ||
592 | * The performance counters don't differentiate between read | ||
593 | * and write accesses/misses so this isn't strictly correct, | ||
594 | * but it's the best we can do. Writes and reads get | ||
595 | * combined. | ||
596 | */ | ||
597 | [C(OP_READ)] = { | ||
598 | [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | ||
599 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | ||
600 | }, | ||
601 | [C(OP_WRITE)] = { | ||
602 | [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | ||
603 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | ||
604 | }, | ||
605 | [C(OP_PREFETCH)] = { | ||
606 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
607 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
608 | }, | ||
609 | }, | ||
610 | [C(L1I)] = { | ||
611 | [C(OP_READ)] = { | ||
612 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
613 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | ||
614 | }, | ||
615 | [C(OP_WRITE)] = { | ||
616 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
617 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | ||
618 | }, | ||
619 | [C(OP_PREFETCH)] = { | ||
620 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
621 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
622 | }, | ||
623 | }, | ||
624 | [C(LL)] = { | ||
625 | [C(OP_READ)] = { | ||
626 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
627 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
628 | }, | ||
629 | [C(OP_WRITE)] = { | ||
630 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
631 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
632 | }, | ||
633 | [C(OP_PREFETCH)] = { | ||
634 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
635 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
636 | }, | ||
637 | }, | ||
638 | [C(DTLB)] = { | ||
639 | /* | ||
640 | * The ARM performance counters can count micro DTLB misses, | ||
641 | * micro ITLB misses and main TLB misses. There isn't an event | ||
642 | * for TLB misses, so use the micro misses here and if users | ||
643 | * want the main TLB misses they can use a raw counter. | ||
644 | */ | ||
645 | [C(OP_READ)] = { | ||
646 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
647 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | ||
648 | }, | ||
649 | [C(OP_WRITE)] = { | ||
650 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
651 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | ||
652 | }, | ||
653 | [C(OP_PREFETCH)] = { | ||
654 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
655 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
656 | }, | ||
657 | }, | ||
658 | [C(ITLB)] = { | ||
659 | [C(OP_READ)] = { | ||
660 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
661 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | ||
662 | }, | ||
663 | [C(OP_WRITE)] = { | ||
664 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
665 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | ||
666 | }, | ||
667 | [C(OP_PREFETCH)] = { | ||
668 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
669 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
670 | }, | ||
671 | }, | ||
672 | [C(BPU)] = { | ||
673 | [C(OP_READ)] = { | ||
674 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
675 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
676 | }, | ||
677 | [C(OP_WRITE)] = { | ||
678 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
679 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
680 | }, | ||
681 | [C(OP_PREFETCH)] = { | ||
682 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
683 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
684 | }, | ||
685 | }, | ||
686 | }; | ||
687 | |||
688 | enum armv6mpcore_perf_types { | ||
689 | ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0, | ||
690 | ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1, | ||
691 | ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2, | ||
692 | ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3, | ||
693 | ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4, | ||
694 | ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5, | ||
695 | ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6, | ||
696 | ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7, | ||
697 | ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8, | ||
698 | ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA, | ||
699 | ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB, | ||
700 | ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC, | ||
701 | ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD, | ||
702 | ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE, | ||
703 | ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF, | ||
704 | ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10, | ||
705 | ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11, | ||
706 | ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12, | ||
707 | ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13, | ||
708 | ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF, | ||
709 | }; | ||
710 | |||
711 | /* | ||
712 | * The hardware events that we support. We do support cache operations but | ||
713 | * we have harvard caches and no way to combine instruction and data | ||
714 | * accesses/misses in hardware. | ||
715 | */ | ||
716 | static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { | ||
717 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, | ||
718 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, | ||
719 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
720 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
721 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, | ||
722 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, | ||
723 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | ||
724 | }; | ||
725 | |||
726 | static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
727 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
728 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
729 | [C(L1D)] = { | ||
730 | [C(OP_READ)] = { | ||
731 | [C(RESULT_ACCESS)] = | ||
732 | ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS, | ||
733 | [C(RESULT_MISS)] = | ||
734 | ARMV6MPCORE_PERFCTR_DCACHE_RDMISS, | ||
735 | }, | ||
736 | [C(OP_WRITE)] = { | ||
737 | [C(RESULT_ACCESS)] = | ||
738 | ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS, | ||
739 | [C(RESULT_MISS)] = | ||
740 | ARMV6MPCORE_PERFCTR_DCACHE_WRMISS, | ||
741 | }, | ||
742 | [C(OP_PREFETCH)] = { | ||
743 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
744 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
745 | }, | ||
746 | }, | ||
747 | [C(L1I)] = { | ||
748 | [C(OP_READ)] = { | ||
749 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
750 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | ||
751 | }, | ||
752 | [C(OP_WRITE)] = { | ||
753 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
754 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | ||
755 | }, | ||
756 | [C(OP_PREFETCH)] = { | ||
757 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
758 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
759 | }, | ||
760 | }, | ||
761 | [C(LL)] = { | ||
762 | [C(OP_READ)] = { | ||
763 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
764 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
765 | }, | ||
766 | [C(OP_WRITE)] = { | ||
767 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
768 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
769 | }, | ||
770 | [C(OP_PREFETCH)] = { | ||
771 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
772 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
773 | }, | ||
774 | }, | ||
775 | [C(DTLB)] = { | ||
776 | /* | ||
777 | * The ARM performance counters can count micro DTLB misses, | ||
778 | * micro ITLB misses and main TLB misses. There isn't an event | ||
779 | * for TLB misses, so use the micro misses here and if users | ||
780 | * want the main TLB misses they can use a raw counter. | ||
781 | */ | ||
782 | [C(OP_READ)] = { | ||
783 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
784 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | ||
785 | }, | ||
786 | [C(OP_WRITE)] = { | ||
787 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
788 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | ||
789 | }, | ||
790 | [C(OP_PREFETCH)] = { | ||
791 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
792 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
793 | }, | ||
794 | }, | ||
795 | [C(ITLB)] = { | ||
796 | [C(OP_READ)] = { | ||
797 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
798 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | ||
799 | }, | ||
800 | [C(OP_WRITE)] = { | ||
801 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
802 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | ||
803 | }, | ||
804 | [C(OP_PREFETCH)] = { | ||
805 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
806 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
807 | }, | ||
808 | }, | ||
809 | [C(BPU)] = { | ||
810 | [C(OP_READ)] = { | ||
811 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
812 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
813 | }, | ||
814 | [C(OP_WRITE)] = { | ||
815 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
816 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
817 | }, | ||
818 | [C(OP_PREFETCH)] = { | ||
819 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
820 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
821 | }, | ||
822 | }, | ||
823 | }; | ||
824 | |||
825 | static inline unsigned long | ||
826 | armv6_pmcr_read(void) | ||
827 | { | ||
828 | u32 val; | ||
829 | asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val)); | ||
830 | return val; | ||
831 | } | ||
832 | |||
833 | static inline void | ||
834 | armv6_pmcr_write(unsigned long val) | ||
835 | { | ||
836 | asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val)); | ||
837 | } | ||
838 | |||
839 | #define ARMV6_PMCR_ENABLE (1 << 0) | ||
840 | #define ARMV6_PMCR_CTR01_RESET (1 << 1) | ||
841 | #define ARMV6_PMCR_CCOUNT_RESET (1 << 2) | ||
842 | #define ARMV6_PMCR_CCOUNT_DIV (1 << 3) | ||
843 | #define ARMV6_PMCR_COUNT0_IEN (1 << 4) | ||
844 | #define ARMV6_PMCR_COUNT1_IEN (1 << 5) | ||
845 | #define ARMV6_PMCR_CCOUNT_IEN (1 << 6) | ||
846 | #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8) | ||
847 | #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9) | ||
848 | #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10) | ||
849 | #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20 | ||
850 | #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT) | ||
851 | #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12 | ||
852 | #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT) | ||
853 | |||
854 | #define ARMV6_PMCR_OVERFLOWED_MASK \ | ||
855 | (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \ | ||
856 | ARMV6_PMCR_CCOUNT_OVERFLOW) | ||
857 | |||
858 | static inline int | ||
859 | armv6_pmcr_has_overflowed(unsigned long pmcr) | ||
860 | { | ||
861 | return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK); | ||
862 | } | ||
863 | |||
864 | static inline int | ||
865 | armv6_pmcr_counter_has_overflowed(unsigned long pmcr, | ||
866 | enum armv6_counters counter) | ||
867 | { | ||
868 | int ret = 0; | ||
869 | |||
870 | if (ARMV6_CYCLE_COUNTER == counter) | ||
871 | ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW; | ||
872 | else if (ARMV6_COUNTER0 == counter) | ||
873 | ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW; | ||
874 | else if (ARMV6_COUNTER1 == counter) | ||
875 | ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW; | ||
876 | else | ||
877 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
878 | |||
879 | return ret; | ||
880 | } | ||
881 | |||
882 | static inline u32 | ||
883 | armv6pmu_read_counter(int counter) | ||
884 | { | ||
885 | unsigned long value = 0; | ||
886 | |||
887 | if (ARMV6_CYCLE_COUNTER == counter) | ||
888 | asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value)); | ||
889 | else if (ARMV6_COUNTER0 == counter) | ||
890 | asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value)); | ||
891 | else if (ARMV6_COUNTER1 == counter) | ||
892 | asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value)); | ||
893 | else | ||
894 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
895 | |||
896 | return value; | ||
897 | } | ||
898 | |||
899 | static inline void | ||
900 | armv6pmu_write_counter(int counter, | ||
901 | u32 value) | ||
902 | { | ||
903 | if (ARMV6_CYCLE_COUNTER == counter) | ||
904 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); | ||
905 | else if (ARMV6_COUNTER0 == counter) | ||
906 | asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value)); | ||
907 | else if (ARMV6_COUNTER1 == counter) | ||
908 | asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value)); | ||
909 | else | ||
910 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
911 | } | ||
912 | |||
913 | void | ||
914 | armv6pmu_enable_event(struct hw_perf_event *hwc, | ||
915 | int idx) | ||
916 | { | ||
917 | unsigned long val, mask, evt, flags; | ||
918 | |||
919 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
920 | mask = 0; | ||
921 | evt = ARMV6_PMCR_CCOUNT_IEN; | ||
922 | } else if (ARMV6_COUNTER0 == idx) { | ||
923 | mask = ARMV6_PMCR_EVT_COUNT0_MASK; | ||
924 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) | | ||
925 | ARMV6_PMCR_COUNT0_IEN; | ||
926 | } else if (ARMV6_COUNTER1 == idx) { | ||
927 | mask = ARMV6_PMCR_EVT_COUNT1_MASK; | ||
928 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) | | ||
929 | ARMV6_PMCR_COUNT1_IEN; | ||
930 | } else { | ||
931 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
932 | return; | ||
933 | } | ||
934 | |||
935 | /* | ||
936 | * Mask out the current event and set the counter to count the event | ||
937 | * that we're interested in. | ||
938 | */ | ||
939 | spin_lock_irqsave(&pmu_lock, flags); | ||
940 | val = armv6_pmcr_read(); | ||
941 | val &= ~mask; | ||
942 | val |= evt; | ||
943 | armv6_pmcr_write(val); | ||
944 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
945 | } | ||
946 | |||
947 | static irqreturn_t | ||
948 | armv6pmu_handle_irq(int irq_num, | ||
949 | void *dev) | ||
950 | { | ||
951 | unsigned long pmcr = armv6_pmcr_read(); | ||
952 | struct perf_sample_data data; | ||
953 | struct cpu_hw_events *cpuc; | ||
954 | struct pt_regs *regs; | ||
955 | int idx; | ||
956 | |||
957 | if (!armv6_pmcr_has_overflowed(pmcr)) | ||
958 | return IRQ_NONE; | ||
959 | |||
960 | regs = get_irq_regs(); | ||
961 | |||
962 | /* | ||
963 | * The interrupts are cleared by writing the overflow flags back to | ||
964 | * the control register. All of the other bits don't have any effect | ||
965 | * if they are rewritten, so write the whole value back. | ||
966 | */ | ||
967 | armv6_pmcr_write(pmcr); | ||
968 | |||
969 | perf_sample_data_init(&data, 0); | ||
970 | |||
971 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
972 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
973 | struct perf_event *event = cpuc->events[idx]; | ||
974 | struct hw_perf_event *hwc; | ||
975 | |||
976 | if (!test_bit(idx, cpuc->active_mask)) | ||
977 | continue; | ||
978 | |||
979 | /* | ||
980 | * We have a single interrupt for all counters. Check that | ||
981 | * each counter has overflowed before we process it. | ||
982 | */ | ||
983 | if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) | ||
984 | continue; | ||
985 | |||
986 | hwc = &event->hw; | ||
987 | armpmu_event_update(event, hwc, idx); | ||
988 | data.period = event->hw.last_period; | ||
989 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
990 | continue; | ||
991 | |||
992 | if (perf_event_overflow(event, 0, &data, regs)) | ||
993 | armpmu->disable(hwc, idx); | ||
994 | } | ||
995 | |||
996 | /* | ||
997 | * Handle the pending perf events. | ||
998 | * | ||
999 | * Note: this call *must* be run with interrupts enabled. For | ||
1000 | * platforms that can have the PMU interrupts raised as a PMI, this | ||
1001 | * will not work. | ||
1002 | */ | ||
1003 | perf_event_do_pending(); | ||
1004 | |||
1005 | return IRQ_HANDLED; | ||
1006 | } | ||
1007 | |||
1008 | static void | ||
1009 | armv6pmu_start(void) | ||
1010 | { | ||
1011 | unsigned long flags, val; | ||
1012 | |||
1013 | spin_lock_irqsave(&pmu_lock, flags); | ||
1014 | val = armv6_pmcr_read(); | ||
1015 | val |= ARMV6_PMCR_ENABLE; | ||
1016 | armv6_pmcr_write(val); | ||
1017 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1018 | } | ||
1019 | |||
1020 | void | ||
1021 | armv6pmu_stop(void) | ||
1022 | { | ||
1023 | unsigned long flags, val; | ||
1024 | |||
1025 | spin_lock_irqsave(&pmu_lock, flags); | ||
1026 | val = armv6_pmcr_read(); | ||
1027 | val &= ~ARMV6_PMCR_ENABLE; | ||
1028 | armv6_pmcr_write(val); | ||
1029 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1030 | } | ||
1031 | |||
1032 | static inline int | ||
1033 | armv6pmu_event_map(int config) | ||
1034 | { | ||
1035 | int mapping = armv6_perf_map[config]; | ||
1036 | if (HW_OP_UNSUPPORTED == mapping) | ||
1037 | mapping = -EOPNOTSUPP; | ||
1038 | return mapping; | ||
1039 | } | ||
1040 | |||
1041 | static inline int | ||
1042 | armv6mpcore_pmu_event_map(int config) | ||
1043 | { | ||
1044 | int mapping = armv6mpcore_perf_map[config]; | ||
1045 | if (HW_OP_UNSUPPORTED == mapping) | ||
1046 | mapping = -EOPNOTSUPP; | ||
1047 | return mapping; | ||
1048 | } | ||
1049 | |||
1050 | static u64 | ||
1051 | armv6pmu_raw_event(u64 config) | ||
1052 | { | ||
1053 | return config & 0xff; | ||
1054 | } | ||
1055 | |||
1056 | static int | ||
1057 | armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
1058 | struct hw_perf_event *event) | ||
1059 | { | ||
1060 | /* Always place a cycle counter into the cycle counter. */ | ||
1061 | if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { | ||
1062 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) | ||
1063 | return -EAGAIN; | ||
1064 | |||
1065 | return ARMV6_CYCLE_COUNTER; | ||
1066 | } else { | ||
1067 | /* | ||
1068 | * For anything other than a cycle counter, try and use | ||
1069 | * counter0 and counter1. | ||
1070 | */ | ||
1071 | if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) { | ||
1072 | return ARMV6_COUNTER1; | ||
1073 | } | ||
1074 | |||
1075 | if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) { | ||
1076 | return ARMV6_COUNTER0; | ||
1077 | } | ||
1078 | |||
1079 | /* The counters are all in use. */ | ||
1080 | return -EAGAIN; | ||
1081 | } | ||
1082 | } | ||
1083 | |||
1084 | static void | ||
1085 | armv6pmu_disable_event(struct hw_perf_event *hwc, | ||
1086 | int idx) | ||
1087 | { | ||
1088 | unsigned long val, mask, evt, flags; | ||
1089 | |||
1090 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
1091 | mask = ARMV6_PMCR_CCOUNT_IEN; | ||
1092 | evt = 0; | ||
1093 | } else if (ARMV6_COUNTER0 == idx) { | ||
1094 | mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK; | ||
1095 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT; | ||
1096 | } else if (ARMV6_COUNTER1 == idx) { | ||
1097 | mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK; | ||
1098 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT; | ||
1099 | } else { | ||
1100 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
1101 | return; | ||
1102 | } | ||
1103 | |||
1104 | /* | ||
1105 | * Mask out the current event and set the counter to count the number | ||
1106 | * of ETM bus signal assertion cycles. The external reporting should | ||
1107 | * be disabled and so this should never increment. | ||
1108 | */ | ||
1109 | spin_lock_irqsave(&pmu_lock, flags); | ||
1110 | val = armv6_pmcr_read(); | ||
1111 | val &= ~mask; | ||
1112 | val |= evt; | ||
1113 | armv6_pmcr_write(val); | ||
1114 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1115 | } | ||
1116 | |||
1117 | static void | ||
1118 | armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | ||
1119 | int idx) | ||
1120 | { | ||
1121 | unsigned long val, mask, flags, evt = 0; | ||
1122 | |||
1123 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
1124 | mask = ARMV6_PMCR_CCOUNT_IEN; | ||
1125 | } else if (ARMV6_COUNTER0 == idx) { | ||
1126 | mask = ARMV6_PMCR_COUNT0_IEN; | ||
1127 | } else if (ARMV6_COUNTER1 == idx) { | ||
1128 | mask = ARMV6_PMCR_COUNT1_IEN; | ||
1129 | } else { | ||
1130 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
1131 | return; | ||
1132 | } | ||
1133 | |||
1134 | /* | ||
1135 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We | ||
1136 | * simply disable the interrupt reporting. | ||
1137 | */ | ||
1138 | spin_lock_irqsave(&pmu_lock, flags); | ||
1139 | val = armv6_pmcr_read(); | ||
1140 | val &= ~mask; | ||
1141 | val |= evt; | ||
1142 | armv6_pmcr_write(val); | ||
1143 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1144 | } | ||
1145 | |||
1146 | static const struct arm_pmu armv6pmu = { | ||
1147 | .name = "v6", | ||
1148 | .handle_irq = armv6pmu_handle_irq, | ||
1149 | .enable = armv6pmu_enable_event, | ||
1150 | .disable = armv6pmu_disable_event, | ||
1151 | .event_map = armv6pmu_event_map, | ||
1152 | .raw_event = armv6pmu_raw_event, | ||
1153 | .read_counter = armv6pmu_read_counter, | ||
1154 | .write_counter = armv6pmu_write_counter, | ||
1155 | .get_event_idx = armv6pmu_get_event_idx, | ||
1156 | .start = armv6pmu_start, | ||
1157 | .stop = armv6pmu_stop, | ||
1158 | .num_events = 3, | ||
1159 | .max_period = (1LLU << 32) - 1, | ||
1160 | }; | ||
1161 | |||
1162 | /* | ||
1163 | * ARMv6mpcore is almost identical to single core ARMv6 with the exception | ||
1164 | * that some of the events have different enumerations and that there is no | ||
1165 | * *hack* to stop the programmable counters. To stop the counters we simply | ||
1166 | * disable the interrupt reporting and update the event. When unthrottling we | ||
1167 | * reset the period and enable the interrupt reporting. | ||
1168 | */ | ||
1169 | static const struct arm_pmu armv6mpcore_pmu = { | ||
1170 | .name = "v6mpcore", | ||
1171 | .handle_irq = armv6pmu_handle_irq, | ||
1172 | .enable = armv6pmu_enable_event, | ||
1173 | .disable = armv6mpcore_pmu_disable_event, | ||
1174 | .event_map = armv6mpcore_pmu_event_map, | ||
1175 | .raw_event = armv6pmu_raw_event, | ||
1176 | .read_counter = armv6pmu_read_counter, | ||
1177 | .write_counter = armv6pmu_write_counter, | ||
1178 | .get_event_idx = armv6pmu_get_event_idx, | ||
1179 | .start = armv6pmu_start, | ||
1180 | .stop = armv6pmu_stop, | ||
1181 | .num_events = 3, | ||
1182 | .max_period = (1LLU << 32) - 1, | ||
1183 | }; | ||
1184 | |||
1185 | /* | ||
1186 | * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. | ||
1187 | * | ||
1188 | * Copied from ARMv6 code, with the low level code inspired | ||
1189 | * by the ARMv7 Oprofile code. | ||
1190 | * | ||
1191 | * Cortex-A8 has up to 4 configurable performance counters and | ||
1192 | * a single cycle counter. | ||
1193 | * Cortex-A9 has up to 31 configurable performance counters and | ||
1194 | * a single cycle counter. | ||
1195 | * | ||
1196 | * All counters can be enabled/disabled and IRQ masked separately. The cycle | ||
1197 | * counter and all 4 performance counters together can be reset separately. | ||
1198 | */ | ||
1199 | |||
1200 | #define ARMV7_PMU_CORTEX_A8_NAME "ARMv7 Cortex-A8" | ||
1201 | |||
1202 | #define ARMV7_PMU_CORTEX_A9_NAME "ARMv7 Cortex-A9" | ||
1203 | |||
1204 | /* Common ARMv7 event types */ | ||
1205 | enum armv7_perf_types { | ||
1206 | ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, | ||
1207 | ARMV7_PERFCTR_IFETCH_MISS = 0x01, | ||
1208 | ARMV7_PERFCTR_ITLB_MISS = 0x02, | ||
1209 | ARMV7_PERFCTR_DCACHE_REFILL = 0x03, | ||
1210 | ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, | ||
1211 | ARMV7_PERFCTR_DTLB_REFILL = 0x05, | ||
1212 | ARMV7_PERFCTR_DREAD = 0x06, | ||
1213 | ARMV7_PERFCTR_DWRITE = 0x07, | ||
1214 | |||
1215 | ARMV7_PERFCTR_EXC_TAKEN = 0x09, | ||
1216 | ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, | ||
1217 | ARMV7_PERFCTR_CID_WRITE = 0x0B, | ||
1218 | /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. | ||
1219 | * It counts: | ||
1220 | * - all branch instructions, | ||
1221 | * - instructions that explicitly write the PC, | ||
1222 | * - exception generating instructions. | ||
1223 | */ | ||
1224 | ARMV7_PERFCTR_PC_WRITE = 0x0C, | ||
1225 | ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, | ||
1226 | ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, | ||
1227 | ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, | ||
1228 | ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, | ||
1229 | |||
1230 | ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12, | ||
1231 | |||
1232 | ARMV7_PERFCTR_CPU_CYCLES = 0xFF | ||
1233 | }; | ||
1234 | |||
1235 | /* ARMv7 Cortex-A8 specific event types */ | ||
1236 | enum armv7_a8_perf_types { | ||
1237 | ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, | ||
1238 | |||
1239 | ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, | ||
1240 | |||
1241 | ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, | ||
1242 | ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, | ||
1243 | ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, | ||
1244 | ARMV7_PERFCTR_L2_ACCESS = 0x43, | ||
1245 | ARMV7_PERFCTR_L2_CACH_MISS = 0x44, | ||
1246 | ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45, | ||
1247 | ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46, | ||
1248 | ARMV7_PERFCTR_MEMORY_REPLAY = 0x47, | ||
1249 | ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48, | ||
1250 | ARMV7_PERFCTR_L1_DATA_MISS = 0x49, | ||
1251 | ARMV7_PERFCTR_L1_INST_MISS = 0x4A, | ||
1252 | ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B, | ||
1253 | ARMV7_PERFCTR_L1_NEON_DATA = 0x4C, | ||
1254 | ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D, | ||
1255 | ARMV7_PERFCTR_L2_NEON = 0x4E, | ||
1256 | ARMV7_PERFCTR_L2_NEON_HIT = 0x4F, | ||
1257 | ARMV7_PERFCTR_L1_INST = 0x50, | ||
1258 | ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51, | ||
1259 | ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52, | ||
1260 | ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53, | ||
1261 | ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54, | ||
1262 | ARMV7_PERFCTR_OP_EXECUTED = 0x55, | ||
1263 | ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56, | ||
1264 | ARMV7_PERFCTR_CYCLES_INST = 0x57, | ||
1265 | ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58, | ||
1266 | ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59, | ||
1267 | ARMV7_PERFCTR_NEON_CYCLES = 0x5A, | ||
1268 | |||
1269 | ARMV7_PERFCTR_PMU0_EVENTS = 0x70, | ||
1270 | ARMV7_PERFCTR_PMU1_EVENTS = 0x71, | ||
1271 | ARMV7_PERFCTR_PMU_EVENTS = 0x72, | ||
1272 | }; | ||
1273 | |||
1274 | /* ARMv7 Cortex-A9 specific event types */ | ||
1275 | enum armv7_a9_perf_types { | ||
1276 | ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40, | ||
1277 | ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41, | ||
1278 | ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42, | ||
1279 | |||
1280 | ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50, | ||
1281 | ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51, | ||
1282 | |||
1283 | ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60, | ||
1284 | ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61, | ||
1285 | ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62, | ||
1286 | ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63, | ||
1287 | ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64, | ||
1288 | ARMV7_PERFCTR_DATA_EVICTION = 0x65, | ||
1289 | ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66, | ||
1290 | ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67, | ||
1291 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68, | ||
1292 | |||
1293 | ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E, | ||
1294 | |||
1295 | ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70, | ||
1296 | ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71, | ||
1297 | ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72, | ||
1298 | ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73, | ||
1299 | ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74, | ||
1300 | |||
1301 | ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80, | ||
1302 | ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81, | ||
1303 | ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82, | ||
1304 | ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83, | ||
1305 | ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84, | ||
1306 | ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85, | ||
1307 | ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86, | ||
1308 | |||
1309 | ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A, | ||
1310 | ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B, | ||
1311 | |||
1312 | ARMV7_PERFCTR_ISB_INST = 0x90, | ||
1313 | ARMV7_PERFCTR_DSB_INST = 0x91, | ||
1314 | ARMV7_PERFCTR_DMB_INST = 0x92, | ||
1315 | ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93, | ||
1316 | |||
1317 | ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0, | ||
1318 | ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1, | ||
1319 | ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2, | ||
1320 | ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3, | ||
1321 | ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4, | ||
1322 | ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 | ||
1323 | }; | ||
1324 | |||
1325 | /* | ||
1326 | * Cortex-A8 HW events mapping | ||
1327 | * | ||
1328 | * The hardware events that we support. We do support cache operations but | ||
1329 | * we have harvard caches and no way to combine instruction and data | ||
1330 | * accesses/misses in hardware. | ||
1331 | */ | ||
1332 | static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { | ||
1333 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
1334 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | ||
1335 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
1336 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
1337 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
1338 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1339 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | ||
1340 | }; | ||
1341 | |||
1342 | static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
1343 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1344 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1345 | [C(L1D)] = { | ||
1346 | /* | ||
1347 | * The performance counters don't differentiate between read | ||
1348 | * and write accesses/misses so this isn't strictly correct, | ||
1349 | * but it's the best we can do. Writes and reads get | ||
1350 | * combined. | ||
1351 | */ | ||
1352 | [C(OP_READ)] = { | ||
1353 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
1354 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
1355 | }, | ||
1356 | [C(OP_WRITE)] = { | ||
1357 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
1358 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
1359 | }, | ||
1360 | [C(OP_PREFETCH)] = { | ||
1361 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1362 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1363 | }, | ||
1364 | }, | ||
1365 | [C(L1I)] = { | ||
1366 | [C(OP_READ)] = { | ||
1367 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, | ||
1368 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, | ||
1369 | }, | ||
1370 | [C(OP_WRITE)] = { | ||
1371 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, | ||
1372 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, | ||
1373 | }, | ||
1374 | [C(OP_PREFETCH)] = { | ||
1375 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1376 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1377 | }, | ||
1378 | }, | ||
1379 | [C(LL)] = { | ||
1380 | [C(OP_READ)] = { | ||
1381 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, | ||
1382 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, | ||
1383 | }, | ||
1384 | [C(OP_WRITE)] = { | ||
1385 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, | ||
1386 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, | ||
1387 | }, | ||
1388 | [C(OP_PREFETCH)] = { | ||
1389 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1390 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1391 | }, | ||
1392 | }, | ||
1393 | [C(DTLB)] = { | ||
1394 | /* | ||
1395 | * Only ITLB misses and DTLB refills are supported. | ||
1396 | * If users want the DTLB refills misses a raw counter | ||
1397 | * must be used. | ||
1398 | */ | ||
1399 | [C(OP_READ)] = { | ||
1400 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1401 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
1402 | }, | ||
1403 | [C(OP_WRITE)] = { | ||
1404 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1405 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
1406 | }, | ||
1407 | [C(OP_PREFETCH)] = { | ||
1408 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1409 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1410 | }, | ||
1411 | }, | ||
1412 | [C(ITLB)] = { | ||
1413 | [C(OP_READ)] = { | ||
1414 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1415 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
1416 | }, | ||
1417 | [C(OP_WRITE)] = { | ||
1418 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1419 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
1420 | }, | ||
1421 | [C(OP_PREFETCH)] = { | ||
1422 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1423 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1424 | }, | ||
1425 | }, | ||
1426 | [C(BPU)] = { | ||
1427 | [C(OP_READ)] = { | ||
1428 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
1429 | [C(RESULT_MISS)] | ||
1430 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1431 | }, | ||
1432 | [C(OP_WRITE)] = { | ||
1433 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
1434 | [C(RESULT_MISS)] | ||
1435 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1436 | }, | ||
1437 | [C(OP_PREFETCH)] = { | ||
1438 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1439 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1440 | }, | ||
1441 | }, | ||
1442 | }; | ||
1443 | |||
1444 | /* | ||
1445 | * Cortex-A9 HW events mapping | ||
1446 | */ | ||
1447 | static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { | ||
1448 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
1449 | [PERF_COUNT_HW_INSTRUCTIONS] = | ||
1450 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, | ||
1451 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT, | ||
1452 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS, | ||
1453 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
1454 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1455 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | ||
1456 | }; | ||
1457 | |||
1458 | static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
1459 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1460 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1461 | [C(L1D)] = { | ||
1462 | /* | ||
1463 | * The performance counters don't differentiate between read | ||
1464 | * and write accesses/misses so this isn't strictly correct, | ||
1465 | * but it's the best we can do. Writes and reads get | ||
1466 | * combined. | ||
1467 | */ | ||
1468 | [C(OP_READ)] = { | ||
1469 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
1470 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
1471 | }, | ||
1472 | [C(OP_WRITE)] = { | ||
1473 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
1474 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
1475 | }, | ||
1476 | [C(OP_PREFETCH)] = { | ||
1477 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1478 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1479 | }, | ||
1480 | }, | ||
1481 | [C(L1I)] = { | ||
1482 | [C(OP_READ)] = { | ||
1483 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1484 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
1485 | }, | ||
1486 | [C(OP_WRITE)] = { | ||
1487 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1488 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
1489 | }, | ||
1490 | [C(OP_PREFETCH)] = { | ||
1491 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1492 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1493 | }, | ||
1494 | }, | ||
1495 | [C(LL)] = { | ||
1496 | [C(OP_READ)] = { | ||
1497 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1498 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1499 | }, | ||
1500 | [C(OP_WRITE)] = { | ||
1501 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1502 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1503 | }, | ||
1504 | [C(OP_PREFETCH)] = { | ||
1505 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1506 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1507 | }, | ||
1508 | }, | ||
1509 | [C(DTLB)] = { | ||
1510 | /* | ||
1511 | * Only ITLB misses and DTLB refills are supported. | ||
1512 | * If users want the DTLB refills misses a raw counter | ||
1513 | * must be used. | ||
1514 | */ | ||
1515 | [C(OP_READ)] = { | ||
1516 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1517 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
1518 | }, | ||
1519 | [C(OP_WRITE)] = { | ||
1520 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1521 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
1522 | }, | ||
1523 | [C(OP_PREFETCH)] = { | ||
1524 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1525 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1526 | }, | ||
1527 | }, | ||
1528 | [C(ITLB)] = { | ||
1529 | [C(OP_READ)] = { | ||
1530 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1531 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
1532 | }, | ||
1533 | [C(OP_WRITE)] = { | ||
1534 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1535 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
1536 | }, | ||
1537 | [C(OP_PREFETCH)] = { | ||
1538 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1539 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1540 | }, | ||
1541 | }, | ||
1542 | [C(BPU)] = { | ||
1543 | [C(OP_READ)] = { | ||
1544 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
1545 | [C(RESULT_MISS)] | ||
1546 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1547 | }, | ||
1548 | [C(OP_WRITE)] = { | ||
1549 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
1550 | [C(RESULT_MISS)] | ||
1551 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1552 | }, | ||
1553 | [C(OP_PREFETCH)] = { | ||
1554 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1555 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1556 | }, | ||
1557 | }, | ||
1558 | }; | ||
1559 | |||
1560 | /* | ||
1561 | * Perf Events counters | ||
1562 | */ | ||
1563 | enum armv7_counters { | ||
1564 | ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ | ||
1565 | ARMV7_COUNTER0 = 2, /* First event counter */ | ||
1566 | }; | ||
1567 | |||
1568 | /* | ||
1569 | * The cycle counter is ARMV7_CYCLE_COUNTER. | ||
1570 | * The first event counter is ARMV7_COUNTER0. | ||
1571 | * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1). | ||
1572 | */ | ||
1573 | #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1) | ||
1574 | |||
1575 | /* | ||
1576 | * ARMv7 low level PMNC access | ||
1577 | */ | ||
1578 | |||
1579 | /* | ||
1580 | * Per-CPU PMNC: config reg | ||
1581 | */ | ||
1582 | #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */ | ||
1583 | #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */ | ||
1584 | #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */ | ||
1585 | #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */ | ||
1586 | #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */ | ||
1587 | #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ | ||
1588 | #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */ | ||
1589 | #define ARMV7_PMNC_N_MASK 0x1f | ||
1590 | #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ | ||
1591 | |||
1592 | /* | ||
1593 | * Available counters | ||
1594 | */ | ||
1595 | #define ARMV7_CNT0 0 /* First event counter */ | ||
1596 | #define ARMV7_CCNT 31 /* Cycle counter */ | ||
1597 | |||
1598 | /* Perf Event to low level counters mapping */ | ||
1599 | #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0) | ||
1600 | |||
1601 | /* | ||
1602 | * CNTENS: counters enable reg | ||
1603 | */ | ||
1604 | #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1605 | #define ARMV7_CNTENS_C (1 << ARMV7_CCNT) | ||
1606 | |||
1607 | /* | ||
1608 | * CNTENC: counters disable reg | ||
1609 | */ | ||
1610 | #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1611 | #define ARMV7_CNTENC_C (1 << ARMV7_CCNT) | ||
1612 | |||
1613 | /* | ||
1614 | * INTENS: counters overflow interrupt enable reg | ||
1615 | */ | ||
1616 | #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1617 | #define ARMV7_INTENS_C (1 << ARMV7_CCNT) | ||
1618 | |||
1619 | /* | ||
1620 | * INTENC: counters overflow interrupt disable reg | ||
1621 | */ | ||
1622 | #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1623 | #define ARMV7_INTENC_C (1 << ARMV7_CCNT) | ||
1624 | |||
1625 | /* | ||
1626 | * EVTSEL: Event selection reg | ||
1627 | */ | ||
1628 | #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ | ||
1629 | |||
1630 | /* | ||
1631 | * SELECT: Counter selection reg | ||
1632 | */ | ||
1633 | #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ | ||
1634 | |||
1635 | /* | ||
1636 | * FLAG: counters overflow flag status reg | ||
1637 | */ | ||
1638 | #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1639 | #define ARMV7_FLAG_C (1 << ARMV7_CCNT) | ||
1640 | #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ | ||
1641 | #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK | ||
1642 | |||
1643 | static inline unsigned long armv7_pmnc_read(void) | ||
1644 | { | ||
1645 | u32 val; | ||
1646 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); | ||
1647 | return val; | ||
1648 | } | ||
1649 | |||
1650 | static inline void armv7_pmnc_write(unsigned long val) | ||
1651 | { | ||
1652 | val &= ARMV7_PMNC_MASK; | ||
1653 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); | ||
1654 | } | ||
1655 | |||
1656 | static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) | ||
1657 | { | ||
1658 | return pmnc & ARMV7_OVERFLOWED_MASK; | ||
1659 | } | ||
1660 | |||
1661 | static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, | ||
1662 | enum armv7_counters counter) | ||
1663 | { | ||
1664 | int ret; | ||
1665 | |||
1666 | if (counter == ARMV7_CYCLE_COUNTER) | ||
1667 | ret = pmnc & ARMV7_FLAG_C; | ||
1668 | else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST)) | ||
1669 | ret = pmnc & ARMV7_FLAG_P(counter); | ||
1670 | else | ||
1671 | pr_err("CPU%u checking wrong counter %d overflow status\n", | ||
1672 | smp_processor_id(), counter); | ||
1673 | |||
1674 | return ret; | ||
1675 | } | ||
1676 | |||
1677 | static inline int armv7_pmnc_select_counter(unsigned int idx) | ||
1678 | { | ||
1679 | u32 val; | ||
1680 | |||
1681 | if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { | ||
1682 | pr_err("CPU%u selecting wrong PMNC counter" | ||
1683 | " %d\n", smp_processor_id(), idx); | ||
1684 | return -1; | ||
1685 | } | ||
1686 | |||
1687 | val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; | ||
1688 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); | ||
1689 | |||
1690 | return idx; | ||
1691 | } | ||
1692 | |||
1693 | static inline u32 armv7pmu_read_counter(int idx) | ||
1694 | { | ||
1695 | unsigned long value = 0; | ||
1696 | |||
1697 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1698 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); | ||
1699 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
1700 | if (armv7_pmnc_select_counter(idx) == idx) | ||
1701 | asm volatile("mrc p15, 0, %0, c9, c13, 2" | ||
1702 | : "=r" (value)); | ||
1703 | } else | ||
1704 | pr_err("CPU%u reading wrong counter %d\n", | ||
1705 | smp_processor_id(), idx); | ||
1706 | |||
1707 | return value; | ||
1708 | } | ||
1709 | |||
1710 | static inline void armv7pmu_write_counter(int idx, u32 value) | ||
1711 | { | ||
1712 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1713 | asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); | ||
1714 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
1715 | if (armv7_pmnc_select_counter(idx) == idx) | ||
1716 | asm volatile("mcr p15, 0, %0, c9, c13, 2" | ||
1717 | : : "r" (value)); | ||
1718 | } else | ||
1719 | pr_err("CPU%u writing wrong counter %d\n", | ||
1720 | smp_processor_id(), idx); | ||
1721 | } | ||
1722 | |||
1723 | static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) | ||
1724 | { | ||
1725 | if (armv7_pmnc_select_counter(idx) == idx) { | ||
1726 | val &= ARMV7_EVTSEL_MASK; | ||
1727 | asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); | ||
1728 | } | ||
1729 | } | ||
1730 | |||
1731 | static inline u32 armv7_pmnc_enable_counter(unsigned int idx) | ||
1732 | { | ||
1733 | u32 val; | ||
1734 | |||
1735 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
1736 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
1737 | pr_err("CPU%u enabling wrong PMNC counter" | ||
1738 | " %d\n", smp_processor_id(), idx); | ||
1739 | return -1; | ||
1740 | } | ||
1741 | |||
1742 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1743 | val = ARMV7_CNTENS_C; | ||
1744 | else | ||
1745 | val = ARMV7_CNTENS_P(idx); | ||
1746 | |||
1747 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); | ||
1748 | |||
1749 | return idx; | ||
1750 | } | ||
1751 | |||
1752 | static inline u32 armv7_pmnc_disable_counter(unsigned int idx) | ||
1753 | { | ||
1754 | u32 val; | ||
1755 | |||
1756 | |||
1757 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
1758 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
1759 | pr_err("CPU%u disabling wrong PMNC counter" | ||
1760 | " %d\n", smp_processor_id(), idx); | ||
1761 | return -1; | ||
1762 | } | ||
1763 | |||
1764 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1765 | val = ARMV7_CNTENC_C; | ||
1766 | else | ||
1767 | val = ARMV7_CNTENC_P(idx); | ||
1768 | |||
1769 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); | ||
1770 | |||
1771 | return idx; | ||
1772 | } | ||
1773 | |||
1774 | static inline u32 armv7_pmnc_enable_intens(unsigned int idx) | ||
1775 | { | ||
1776 | u32 val; | ||
1777 | |||
1778 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
1779 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
1780 | pr_err("CPU%u enabling wrong PMNC counter" | ||
1781 | " interrupt enable %d\n", smp_processor_id(), idx); | ||
1782 | return -1; | ||
1783 | } | ||
1784 | |||
1785 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1786 | val = ARMV7_INTENS_C; | ||
1787 | else | ||
1788 | val = ARMV7_INTENS_P(idx); | ||
1789 | |||
1790 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); | ||
1791 | |||
1792 | return idx; | ||
1793 | } | ||
1794 | |||
1795 | static inline u32 armv7_pmnc_disable_intens(unsigned int idx) | ||
1796 | { | ||
1797 | u32 val; | ||
1798 | |||
1799 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
1800 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
1801 | pr_err("CPU%u disabling wrong PMNC counter" | ||
1802 | " interrupt enable %d\n", smp_processor_id(), idx); | ||
1803 | return -1; | ||
1804 | } | ||
1805 | |||
1806 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1807 | val = ARMV7_INTENC_C; | ||
1808 | else | ||
1809 | val = ARMV7_INTENC_P(idx); | ||
1810 | |||
1811 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); | ||
1812 | |||
1813 | return idx; | ||
1814 | } | ||
1815 | |||
1816 | static inline u32 armv7_pmnc_getreset_flags(void) | ||
1817 | { | ||
1818 | u32 val; | ||
1819 | |||
1820 | /* Read */ | ||
1821 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | ||
1822 | |||
1823 | /* Write to clear flags */ | ||
1824 | val &= ARMV7_FLAG_MASK; | ||
1825 | asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); | ||
1826 | |||
1827 | return val; | ||
1828 | } | ||
1829 | |||
1830 | #ifdef DEBUG | ||
1831 | static void armv7_pmnc_dump_regs(void) | ||
1832 | { | ||
1833 | u32 val; | ||
1834 | unsigned int cnt; | ||
1835 | |||
1836 | printk(KERN_INFO "PMNC registers dump:\n"); | ||
1837 | |||
1838 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); | ||
1839 | printk(KERN_INFO "PMNC =0x%08x\n", val); | ||
1840 | |||
1841 | asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); | ||
1842 | printk(KERN_INFO "CNTENS=0x%08x\n", val); | ||
1843 | |||
1844 | asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); | ||
1845 | printk(KERN_INFO "INTENS=0x%08x\n", val); | ||
1846 | |||
1847 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | ||
1848 | printk(KERN_INFO "FLAGS =0x%08x\n", val); | ||
1849 | |||
1850 | asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); | ||
1851 | printk(KERN_INFO "SELECT=0x%08x\n", val); | ||
1852 | |||
1853 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); | ||
1854 | printk(KERN_INFO "CCNT =0x%08x\n", val); | ||
1855 | |||
1856 | for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { | ||
1857 | armv7_pmnc_select_counter(cnt); | ||
1858 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); | ||
1859 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", | ||
1860 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | ||
1861 | asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); | ||
1862 | printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", | ||
1863 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | ||
1864 | } | ||
1865 | } | ||
1866 | #endif | ||
1867 | |||
1868 | void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
1869 | { | ||
1870 | unsigned long flags; | ||
1871 | |||
1872 | /* | ||
1873 | * Enable counter and interrupt, and set the counter to count | ||
1874 | * the event that we're interested in. | ||
1875 | */ | ||
1876 | spin_lock_irqsave(&pmu_lock, flags); | ||
1877 | |||
1878 | /* | ||
1879 | * Disable counter | ||
1880 | */ | ||
1881 | armv7_pmnc_disable_counter(idx); | ||
1882 | |||
1883 | /* | ||
1884 | * Set event (if destined for PMNx counters) | ||
1885 | * We don't need to set the event if it's a cycle count | ||
1886 | */ | ||
1887 | if (idx != ARMV7_CYCLE_COUNTER) | ||
1888 | armv7_pmnc_write_evtsel(idx, hwc->config_base); | ||
1889 | |||
1890 | /* | ||
1891 | * Enable interrupt for this counter | ||
1892 | */ | ||
1893 | armv7_pmnc_enable_intens(idx); | ||
1894 | |||
1895 | /* | ||
1896 | * Enable counter | ||
1897 | */ | ||
1898 | armv7_pmnc_enable_counter(idx); | ||
1899 | |||
1900 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1901 | } | ||
1902 | |||
1903 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
1904 | { | ||
1905 | unsigned long flags; | ||
1906 | |||
1907 | /* | ||
1908 | * Disable counter and interrupt | ||
1909 | */ | ||
1910 | spin_lock_irqsave(&pmu_lock, flags); | ||
1911 | |||
1912 | /* | ||
1913 | * Disable counter | ||
1914 | */ | ||
1915 | armv7_pmnc_disable_counter(idx); | ||
1916 | |||
1917 | /* | ||
1918 | * Disable interrupt for this counter | ||
1919 | */ | ||
1920 | armv7_pmnc_disable_intens(idx); | ||
1921 | |||
1922 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1923 | } | ||
1924 | |||
1925 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | ||
1926 | { | ||
1927 | unsigned long pmnc; | ||
1928 | struct perf_sample_data data; | ||
1929 | struct cpu_hw_events *cpuc; | ||
1930 | struct pt_regs *regs; | ||
1931 | int idx; | ||
1932 | |||
1933 | /* | ||
1934 | * Get and reset the IRQ flags | ||
1935 | */ | ||
1936 | pmnc = armv7_pmnc_getreset_flags(); | ||
1937 | |||
1938 | /* | ||
1939 | * Did an overflow occur? | ||
1940 | */ | ||
1941 | if (!armv7_pmnc_has_overflowed(pmnc)) | ||
1942 | return IRQ_NONE; | ||
1943 | |||
1944 | /* | ||
1945 | * Handle the counter(s) overflow(s) | ||
1946 | */ | ||
1947 | regs = get_irq_regs(); | ||
1948 | |||
1949 | perf_sample_data_init(&data, 0); | ||
1950 | |||
1951 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
1952 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
1953 | struct perf_event *event = cpuc->events[idx]; | ||
1954 | struct hw_perf_event *hwc; | ||
1955 | |||
1956 | if (!test_bit(idx, cpuc->active_mask)) | ||
1957 | continue; | ||
1958 | |||
1959 | /* | ||
1960 | * We have a single interrupt for all counters. Check that | ||
1961 | * each counter has overflowed before we process it. | ||
1962 | */ | ||
1963 | if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) | ||
1964 | continue; | ||
1965 | |||
1966 | hwc = &event->hw; | ||
1967 | armpmu_event_update(event, hwc, idx); | ||
1968 | data.period = event->hw.last_period; | ||
1969 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
1970 | continue; | ||
1971 | |||
1972 | if (perf_event_overflow(event, 0, &data, regs)) | ||
1973 | armpmu->disable(hwc, idx); | ||
1974 | } | ||
1975 | |||
1976 | /* | ||
1977 | * Handle the pending perf events. | ||
1978 | * | ||
1979 | * Note: this call *must* be run with interrupts enabled. For | ||
1980 | * platforms that can have the PMU interrupts raised as a PMI, this | ||
1981 | * will not work. | ||
1982 | */ | ||
1983 | perf_event_do_pending(); | ||
1984 | |||
1985 | return IRQ_HANDLED; | ||
1986 | } | ||
1987 | |||
1988 | static void armv7pmu_start(void) | ||
1989 | { | ||
1990 | unsigned long flags; | ||
1991 | |||
1992 | spin_lock_irqsave(&pmu_lock, flags); | ||
1993 | /* Enable all counters */ | ||
1994 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); | ||
1995 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1996 | } | ||
1997 | |||
1998 | static void armv7pmu_stop(void) | ||
1999 | { | ||
2000 | unsigned long flags; | ||
2001 | |||
2002 | spin_lock_irqsave(&pmu_lock, flags); | ||
2003 | /* Disable all counters */ | ||
2004 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); | ||
2005 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
2006 | } | ||
2007 | |||
2008 | static inline int armv7_a8_pmu_event_map(int config) | ||
2009 | { | ||
2010 | int mapping = armv7_a8_perf_map[config]; | ||
2011 | if (HW_OP_UNSUPPORTED == mapping) | ||
2012 | mapping = -EOPNOTSUPP; | ||
2013 | return mapping; | ||
2014 | } | ||
2015 | |||
2016 | static inline int armv7_a9_pmu_event_map(int config) | ||
2017 | { | ||
2018 | int mapping = armv7_a9_perf_map[config]; | ||
2019 | if (HW_OP_UNSUPPORTED == mapping) | ||
2020 | mapping = -EOPNOTSUPP; | ||
2021 | return mapping; | ||
2022 | } | ||
2023 | |||
2024 | static u64 armv7pmu_raw_event(u64 config) | ||
2025 | { | ||
2026 | return config & 0xff; | ||
2027 | } | ||
2028 | |||
2029 | static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
2030 | struct hw_perf_event *event) | ||
2031 | { | ||
2032 | int idx; | ||
2033 | |||
2034 | /* Always place a cycle counter into the cycle counter. */ | ||
2035 | if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { | ||
2036 | if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) | ||
2037 | return -EAGAIN; | ||
2038 | |||
2039 | return ARMV7_CYCLE_COUNTER; | ||
2040 | } else { | ||
2041 | /* | ||
2042 | * For anything other than a cycle counter, try and use | ||
2043 | * the events counters | ||
2044 | */ | ||
2045 | for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { | ||
2046 | if (!test_and_set_bit(idx, cpuc->used_mask)) | ||
2047 | return idx; | ||
2048 | } | ||
2049 | |||
2050 | /* The counters are all in use. */ | ||
2051 | return -EAGAIN; | ||
2052 | } | ||
2053 | } | ||
2054 | |||
2055 | static struct arm_pmu armv7pmu = { | ||
2056 | .handle_irq = armv7pmu_handle_irq, | ||
2057 | .enable = armv7pmu_enable_event, | ||
2058 | .disable = armv7pmu_disable_event, | ||
2059 | .raw_event = armv7pmu_raw_event, | ||
2060 | .read_counter = armv7pmu_read_counter, | ||
2061 | .write_counter = armv7pmu_write_counter, | ||
2062 | .get_event_idx = armv7pmu_get_event_idx, | ||
2063 | .start = armv7pmu_start, | ||
2064 | .stop = armv7pmu_stop, | ||
2065 | .max_period = (1LLU << 32) - 1, | ||
2066 | }; | ||
2067 | |||
2068 | static u32 __init armv7_reset_read_pmnc(void) | ||
2069 | { | ||
2070 | u32 nb_cnt; | ||
2071 | |||
2072 | /* Initialize & Reset PMNC: C and P bits */ | ||
2073 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | ||
2074 | |||
2075 | /* Read the nb of CNTx counters supported from PMNC */ | ||
2076 | nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; | ||
2077 | |||
2078 | /* Add the CPU cycles counter and return */ | ||
2079 | return nb_cnt + 1; | ||
2080 | } | ||
2081 | |||
2082 | static int __init | ||
2083 | init_hw_perf_events(void) | ||
2084 | { | ||
2085 | unsigned long cpuid = read_cpuid_id(); | ||
2086 | unsigned long implementor = (cpuid & 0xFF000000) >> 24; | ||
2087 | unsigned long part_number = (cpuid & 0xFFF0); | ||
2088 | |||
2089 | /* We only support ARM CPUs implemented by ARM at the moment. */ | ||
2090 | if (0x41 == implementor) { | ||
2091 | switch (part_number) { | ||
2092 | case 0xB360: /* ARM1136 */ | ||
2093 | case 0xB560: /* ARM1156 */ | ||
2094 | case 0xB760: /* ARM1176 */ | ||
2095 | armpmu = &armv6pmu; | ||
2096 | memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, | ||
2097 | sizeof(armv6_perf_cache_map)); | ||
2098 | perf_max_events = armv6pmu.num_events; | ||
2099 | break; | ||
2100 | case 0xB020: /* ARM11mpcore */ | ||
2101 | armpmu = &armv6mpcore_pmu; | ||
2102 | memcpy(armpmu_perf_cache_map, | ||
2103 | armv6mpcore_perf_cache_map, | ||
2104 | sizeof(armv6mpcore_perf_cache_map)); | ||
2105 | perf_max_events = armv6mpcore_pmu.num_events; | ||
2106 | break; | ||
2107 | case 0xC080: /* Cortex-A8 */ | ||
2108 | armv7pmu.name = ARMV7_PMU_CORTEX_A8_NAME; | ||
2109 | memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map, | ||
2110 | sizeof(armv7_a8_perf_cache_map)); | ||
2111 | armv7pmu.event_map = armv7_a8_pmu_event_map; | ||
2112 | armpmu = &armv7pmu; | ||
2113 | |||
2114 | /* Reset PMNC and read the nb of CNTx counters | ||
2115 | supported */ | ||
2116 | armv7pmu.num_events = armv7_reset_read_pmnc(); | ||
2117 | perf_max_events = armv7pmu.num_events; | ||
2118 | break; | ||
2119 | case 0xC090: /* Cortex-A9 */ | ||
2120 | armv7pmu.name = ARMV7_PMU_CORTEX_A9_NAME; | ||
2121 | memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map, | ||
2122 | sizeof(armv7_a9_perf_cache_map)); | ||
2123 | armv7pmu.event_map = armv7_a9_pmu_event_map; | ||
2124 | armpmu = &armv7pmu; | ||
2125 | |||
2126 | /* Reset PMNC and read the nb of CNTx counters | ||
2127 | supported */ | ||
2128 | armv7pmu.num_events = armv7_reset_read_pmnc(); | ||
2129 | perf_max_events = armv7pmu.num_events; | ||
2130 | break; | ||
2131 | default: | ||
2132 | pr_info("no hardware support available\n"); | ||
2133 | perf_max_events = -1; | ||
2134 | } | ||
2135 | } | ||
2136 | |||
2137 | if (armpmu) | ||
2138 | pr_info("enabled with %s PMU driver, %d counters available\n", | ||
2139 | armpmu->name, armpmu->num_events); | ||
2140 | |||
2141 | return 0; | ||
2142 | } | ||
2143 | arch_initcall(init_hw_perf_events); | ||
2144 | |||
2145 | /* | ||
2146 | * Callchain handling code. | ||
2147 | */ | ||
2148 | static inline void | ||
2149 | callchain_store(struct perf_callchain_entry *entry, | ||
2150 | u64 ip) | ||
2151 | { | ||
2152 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
2153 | entry->ip[entry->nr++] = ip; | ||
2154 | } | ||
2155 | |||
2156 | /* | ||
2157 | * The registers we're interested in are at the end of the variable | ||
2158 | * length saved register structure. The fp points at the end of this | ||
2159 | * structure so the address of this struct is: | ||
2160 | * (struct frame_tail *)(xxx->fp)-1 | ||
2161 | * | ||
2162 | * This code has been adapted from the ARM OProfile support. | ||
2163 | */ | ||
2164 | struct frame_tail { | ||
2165 | struct frame_tail *fp; | ||
2166 | unsigned long sp; | ||
2167 | unsigned long lr; | ||
2168 | } __attribute__((packed)); | ||
2169 | |||
2170 | /* | ||
2171 | * Get the return address for a single stackframe and return a pointer to the | ||
2172 | * next frame tail. | ||
2173 | */ | ||
2174 | static struct frame_tail * | ||
2175 | user_backtrace(struct frame_tail *tail, | ||
2176 | struct perf_callchain_entry *entry) | ||
2177 | { | ||
2178 | struct frame_tail buftail; | ||
2179 | |||
2180 | /* Also check accessibility of one struct frame_tail beyond */ | ||
2181 | if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) | ||
2182 | return NULL; | ||
2183 | if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) | ||
2184 | return NULL; | ||
2185 | |||
2186 | callchain_store(entry, buftail.lr); | ||
2187 | |||
2188 | /* | ||
2189 | * Frame pointers should strictly progress back up the stack | ||
2190 | * (towards higher addresses). | ||
2191 | */ | ||
2192 | if (tail >= buftail.fp) | ||
2193 | return NULL; | ||
2194 | |||
2195 | return buftail.fp - 1; | ||
2196 | } | ||
2197 | |||
2198 | static void | ||
2199 | perf_callchain_user(struct pt_regs *regs, | ||
2200 | struct perf_callchain_entry *entry) | ||
2201 | { | ||
2202 | struct frame_tail *tail; | ||
2203 | |||
2204 | callchain_store(entry, PERF_CONTEXT_USER); | ||
2205 | |||
2206 | if (!user_mode(regs)) | ||
2207 | regs = task_pt_regs(current); | ||
2208 | |||
2209 | tail = (struct frame_tail *)regs->ARM_fp - 1; | ||
2210 | |||
2211 | while (tail && !((unsigned long)tail & 0x3)) | ||
2212 | tail = user_backtrace(tail, entry); | ||
2213 | } | ||
2214 | |||
2215 | /* | ||
2216 | * Gets called by walk_stackframe() for every stackframe. This will be called | ||
2217 | * whist unwinding the stackframe and is like a subroutine return so we use | ||
2218 | * the PC. | ||
2219 | */ | ||
2220 | static int | ||
2221 | callchain_trace(struct stackframe *fr, | ||
2222 | void *data) | ||
2223 | { | ||
2224 | struct perf_callchain_entry *entry = data; | ||
2225 | callchain_store(entry, fr->pc); | ||
2226 | return 0; | ||
2227 | } | ||
2228 | |||
2229 | static void | ||
2230 | perf_callchain_kernel(struct pt_regs *regs, | ||
2231 | struct perf_callchain_entry *entry) | ||
2232 | { | ||
2233 | struct stackframe fr; | ||
2234 | |||
2235 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
2236 | fr.fp = regs->ARM_fp; | ||
2237 | fr.sp = regs->ARM_sp; | ||
2238 | fr.lr = regs->ARM_lr; | ||
2239 | fr.pc = regs->ARM_pc; | ||
2240 | walk_stackframe(&fr, callchain_trace, entry); | ||
2241 | } | ||
2242 | |||
2243 | static void | ||
2244 | perf_do_callchain(struct pt_regs *regs, | ||
2245 | struct perf_callchain_entry *entry) | ||
2246 | { | ||
2247 | int is_user; | ||
2248 | |||
2249 | if (!regs) | ||
2250 | return; | ||
2251 | |||
2252 | is_user = user_mode(regs); | ||
2253 | |||
2254 | if (!current || !current->pid) | ||
2255 | return; | ||
2256 | |||
2257 | if (is_user && current->state != TASK_RUNNING) | ||
2258 | return; | ||
2259 | |||
2260 | if (!is_user) | ||
2261 | perf_callchain_kernel(regs, entry); | ||
2262 | |||
2263 | if (current->mm) | ||
2264 | perf_callchain_user(regs, entry); | ||
2265 | } | ||
2266 | |||
2267 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | ||
2268 | |||
2269 | struct perf_callchain_entry * | ||
2270 | perf_callchain(struct pt_regs *regs) | ||
2271 | { | ||
2272 | struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); | ||
2273 | |||
2274 | entry->nr = 0; | ||
2275 | perf_do_callchain(regs, entry); | ||
2276 | return entry; | ||
2277 | } | ||
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c new file mode 100644 index 000000000000..a124312e343f --- /dev/null +++ b/arch/arm/kernel/pmu.c | |||
@@ -0,0 +1,103 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/pmu.c | ||
3 | * | ||
4 | * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/cpumask.h> | ||
13 | #include <linux/err.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/module.h> | ||
17 | |||
18 | #include <asm/pmu.h> | ||
19 | |||
20 | /* | ||
21 | * Define the IRQs for the system. We could use something like a platform | ||
22 | * device but that seems fairly heavyweight for this. Also, the performance | ||
23 | * counters can't be removed or hotplugged. | ||
24 | * | ||
25 | * Ordering is important: init_pmu() will use the ordering to set the affinity | ||
26 | * to the corresponding core. e.g. the first interrupt will go to cpu 0, the | ||
27 | * second goes to cpu 1 etc. | ||
28 | */ | ||
29 | static const int irqs[] = { | ||
30 | #if defined(CONFIG_ARCH_OMAP2) | ||
31 | 3, | ||
32 | #elif defined(CONFIG_ARCH_BCMRING) | ||
33 | IRQ_PMUIRQ, | ||
34 | #elif defined(CONFIG_MACH_REALVIEW_EB) | ||
35 | IRQ_EB11MP_PMU_CPU0, | ||
36 | IRQ_EB11MP_PMU_CPU1, | ||
37 | IRQ_EB11MP_PMU_CPU2, | ||
38 | IRQ_EB11MP_PMU_CPU3, | ||
39 | #elif defined(CONFIG_ARCH_OMAP3) | ||
40 | INT_34XX_BENCH_MPU_EMUL, | ||
41 | #elif defined(CONFIG_ARCH_IOP32X) | ||
42 | IRQ_IOP32X_CORE_PMU, | ||
43 | #elif defined(CONFIG_ARCH_IOP33X) | ||
44 | IRQ_IOP33X_CORE_PMU, | ||
45 | #elif defined(CONFIG_ARCH_PXA) | ||
46 | IRQ_PMU, | ||
47 | #endif | ||
48 | }; | ||
49 | |||
50 | static const struct pmu_irqs pmu_irqs = { | ||
51 | .irqs = irqs, | ||
52 | .num_irqs = ARRAY_SIZE(irqs), | ||
53 | }; | ||
54 | |||
55 | static volatile long pmu_lock; | ||
56 | |||
57 | const struct pmu_irqs * | ||
58 | reserve_pmu(void) | ||
59 | { | ||
60 | return test_and_set_bit_lock(0, &pmu_lock) ? ERR_PTR(-EBUSY) : | ||
61 | &pmu_irqs; | ||
62 | } | ||
63 | EXPORT_SYMBOL_GPL(reserve_pmu); | ||
64 | |||
65 | int | ||
66 | release_pmu(const struct pmu_irqs *irqs) | ||
67 | { | ||
68 | if (WARN_ON(irqs != &pmu_irqs)) | ||
69 | return -EINVAL; | ||
70 | clear_bit_unlock(0, &pmu_lock); | ||
71 | return 0; | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(release_pmu); | ||
74 | |||
75 | static int | ||
76 | set_irq_affinity(int irq, | ||
77 | unsigned int cpu) | ||
78 | { | ||
79 | #ifdef CONFIG_SMP | ||
80 | int err = irq_set_affinity(irq, cpumask_of(cpu)); | ||
81 | if (err) | ||
82 | pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", | ||
83 | irq, cpu); | ||
84 | return err; | ||
85 | #else | ||
86 | return 0; | ||
87 | #endif | ||
88 | } | ||
89 | |||
90 | int | ||
91 | init_pmu(void) | ||
92 | { | ||
93 | int i, err = 0; | ||
94 | |||
95 | for (i = 0; i < pmu_irqs.num_irqs; ++i) { | ||
96 | err = set_irq_affinity(pmu_irqs.irqs[i], i); | ||
97 | if (err) | ||
98 | break; | ||
99 | } | ||
100 | |||
101 | return err; | ||
102 | } | ||
103 | EXPORT_SYMBOL_GPL(init_pmu); | ||
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 0d96d0171c05..acf5e6fdb6dc 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/stddef.h> | 17 | #include <linux/stddef.h> |
18 | #include <linux/unistd.h> | 18 | #include <linux/unistd.h> |
19 | #include <linux/slab.h> | ||
20 | #include <linux/user.h> | 19 | #include <linux/user.h> |
21 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
22 | #include <linux/reboot.h> | 21 | #include <linux/reboot.h> |
@@ -212,7 +211,8 @@ void __show_regs(struct pt_regs *regs) | |||
212 | char buf[64]; | 211 | char buf[64]; |
213 | 212 | ||
214 | printk("CPU: %d %s (%s %.*s)\n", | 213 | printk("CPU: %d %s (%s %.*s)\n", |
215 | smp_processor_id(), print_tainted(), init_utsname()->release, | 214 | raw_smp_processor_id(), print_tainted(), |
215 | init_utsname()->release, | ||
216 | (int)strcspn(init_utsname()->version, " "), | 216 | (int)strcspn(init_utsname()->version, " "), |
217 | init_utsname()->version); | 217 | init_utsname()->version); |
218 | print_symbol("PC is at %s\n", instruction_pointer(regs)); | 218 | print_symbol("PC is at %s\n", instruction_pointer(regs)); |
@@ -274,17 +274,18 @@ void show_regs(struct pt_regs * regs) | |||
274 | __backtrace(); | 274 | __backtrace(); |
275 | } | 275 | } |
276 | 276 | ||
277 | ATOMIC_NOTIFIER_HEAD(thread_notify_head); | ||
278 | |||
279 | EXPORT_SYMBOL_GPL(thread_notify_head); | ||
280 | |||
277 | /* | 281 | /* |
278 | * Free current thread data structures etc.. | 282 | * Free current thread data structures etc.. |
279 | */ | 283 | */ |
280 | void exit_thread(void) | 284 | void exit_thread(void) |
281 | { | 285 | { |
286 | thread_notify(THREAD_NOTIFY_EXIT, current_thread_info()); | ||
282 | } | 287 | } |
283 | 288 | ||
284 | ATOMIC_NOTIFIER_HEAD(thread_notify_head); | ||
285 | |||
286 | EXPORT_SYMBOL_GPL(thread_notify_head); | ||
287 | |||
288 | void flush_thread(void) | 289 | void flush_thread(void) |
289 | { | 290 | { |
290 | struct thread_info *thread = current_thread_info(); | 291 | struct thread_info *thread = current_thread_info(); |
@@ -299,9 +300,6 @@ void flush_thread(void) | |||
299 | 300 | ||
300 | void release_thread(struct task_struct *dead_task) | 301 | void release_thread(struct task_struct *dead_task) |
301 | { | 302 | { |
302 | struct thread_info *thread = task_thread_info(dead_task); | ||
303 | |||
304 | thread_notify(THREAD_NOTIFY_RELEASE, thread); | ||
305 | } | 303 | } |
306 | 304 | ||
307 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 305 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
@@ -357,7 +355,7 @@ EXPORT_SYMBOL(dump_fpu); | |||
357 | * the thread function, and r3 points to the exit function. | 355 | * the thread function, and r3 points to the exit function. |
358 | */ | 356 | */ |
359 | extern void kernel_thread_helper(void); | 357 | extern void kernel_thread_helper(void); |
360 | asm( ".section .text\n" | 358 | asm( ".pushsection .text\n" |
361 | " .align\n" | 359 | " .align\n" |
362 | " .type kernel_thread_helper, #function\n" | 360 | " .type kernel_thread_helper, #function\n" |
363 | "kernel_thread_helper:\n" | 361 | "kernel_thread_helper:\n" |
@@ -365,11 +363,11 @@ asm( ".section .text\n" | |||
365 | " mov lr, r3\n" | 363 | " mov lr, r3\n" |
366 | " mov pc, r2\n" | 364 | " mov pc, r2\n" |
367 | " .size kernel_thread_helper, . - kernel_thread_helper\n" | 365 | " .size kernel_thread_helper, . - kernel_thread_helper\n" |
368 | " .previous"); | 366 | " .popsection"); |
369 | 367 | ||
370 | #ifdef CONFIG_ARM_UNWIND | 368 | #ifdef CONFIG_ARM_UNWIND |
371 | extern void kernel_thread_exit(long code); | 369 | extern void kernel_thread_exit(long code); |
372 | asm( ".section .text\n" | 370 | asm( ".pushsection .text\n" |
373 | " .align\n" | 371 | " .align\n" |
374 | " .type kernel_thread_exit, #function\n" | 372 | " .type kernel_thread_exit, #function\n" |
375 | "kernel_thread_exit:\n" | 373 | "kernel_thread_exit:\n" |
@@ -379,7 +377,7 @@ asm( ".section .text\n" | |||
379 | " nop\n" | 377 | " nop\n" |
380 | " .fnend\n" | 378 | " .fnend\n" |
381 | " .size kernel_thread_exit, . - kernel_thread_exit\n" | 379 | " .size kernel_thread_exit, . - kernel_thread_exit\n" |
382 | " .previous"); | 380 | " .popsection"); |
383 | #else | 381 | #else |
384 | #define kernel_thread_exit do_exit | 382 | #define kernel_thread_exit do_exit |
385 | #endif | 383 | #endif |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index a2ea3854cb3c..3f562a7c0a99 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -452,12 +452,23 @@ void ptrace_cancel_bpt(struct task_struct *child) | |||
452 | clear_breakpoint(child, &child->thread.debug.bp[i]); | 452 | clear_breakpoint(child, &child->thread.debug.bp[i]); |
453 | } | 453 | } |
454 | 454 | ||
455 | void user_disable_single_step(struct task_struct *task) | ||
456 | { | ||
457 | task->ptrace &= ~PT_SINGLESTEP; | ||
458 | ptrace_cancel_bpt(task); | ||
459 | } | ||
460 | |||
461 | void user_enable_single_step(struct task_struct *task) | ||
462 | { | ||
463 | task->ptrace |= PT_SINGLESTEP; | ||
464 | } | ||
465 | |||
455 | /* | 466 | /* |
456 | * Called by kernel/ptrace.c when detaching.. | 467 | * Called by kernel/ptrace.c when detaching.. |
457 | */ | 468 | */ |
458 | void ptrace_disable(struct task_struct *child) | 469 | void ptrace_disable(struct task_struct *child) |
459 | { | 470 | { |
460 | single_step_disable(child); | 471 | user_disable_single_step(child); |
461 | } | 472 | } |
462 | 473 | ||
463 | /* | 474 | /* |
@@ -499,10 +510,41 @@ static struct undef_hook thumb_break_hook = { | |||
499 | .fn = break_trap, | 510 | .fn = break_trap, |
500 | }; | 511 | }; |
501 | 512 | ||
513 | static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr) | ||
514 | { | ||
515 | unsigned int instr2; | ||
516 | void __user *pc; | ||
517 | |||
518 | /* Check the second half of the instruction. */ | ||
519 | pc = (void __user *)(instruction_pointer(regs) + 2); | ||
520 | |||
521 | if (processor_mode(regs) == SVC_MODE) { | ||
522 | instr2 = *(u16 *) pc; | ||
523 | } else { | ||
524 | get_user(instr2, (u16 __user *)pc); | ||
525 | } | ||
526 | |||
527 | if (instr2 == 0xa000) { | ||
528 | ptrace_break(current, regs); | ||
529 | return 0; | ||
530 | } else { | ||
531 | return 1; | ||
532 | } | ||
533 | } | ||
534 | |||
535 | static struct undef_hook thumb2_break_hook = { | ||
536 | .instr_mask = 0xffff, | ||
537 | .instr_val = 0xf7f0, | ||
538 | .cpsr_mask = PSR_T_BIT, | ||
539 | .cpsr_val = PSR_T_BIT, | ||
540 | .fn = thumb2_break_trap, | ||
541 | }; | ||
542 | |||
502 | static int __init ptrace_break_init(void) | 543 | static int __init ptrace_break_init(void) |
503 | { | 544 | { |
504 | register_undef_hook(&arm_break_hook); | 545 | register_undef_hook(&arm_break_hook); |
505 | register_undef_hook(&thumb_break_hook); | 546 | register_undef_hook(&thumb_break_hook); |
547 | register_undef_hook(&thumb2_break_hook); | ||
506 | return 0; | 548 | return 0; |
507 | } | 549 | } |
508 | 550 | ||
@@ -669,7 +711,7 @@ static int ptrace_getvfpregs(struct task_struct *tsk, void __user *data) | |||
669 | union vfp_state *vfp = &thread->vfpstate; | 711 | union vfp_state *vfp = &thread->vfpstate; |
670 | struct user_vfp __user *ufp = data; | 712 | struct user_vfp __user *ufp = data; |
671 | 713 | ||
672 | vfp_sync_state(thread); | 714 | vfp_sync_hwstate(thread); |
673 | 715 | ||
674 | /* copy the floating point registers */ | 716 | /* copy the floating point registers */ |
675 | if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs, | 717 | if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs, |
@@ -692,7 +734,7 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data) | |||
692 | union vfp_state *vfp = &thread->vfpstate; | 734 | union vfp_state *vfp = &thread->vfpstate; |
693 | struct user_vfp __user *ufp = data; | 735 | struct user_vfp __user *ufp = data; |
694 | 736 | ||
695 | vfp_sync_state(thread); | 737 | vfp_sync_hwstate(thread); |
696 | 738 | ||
697 | /* copy the floating point registers */ | 739 | /* copy the floating point registers */ |
698 | if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs, | 740 | if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs, |
@@ -703,6 +745,8 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data) | |||
703 | if (get_user(vfp->hard.fpscr, &ufp->fpscr)) | 745 | if (get_user(vfp->hard.fpscr, &ufp->fpscr)) |
704 | return -EFAULT; | 746 | return -EFAULT; |
705 | 747 | ||
748 | vfp_flush_hwstate(thread); | ||
749 | |||
706 | return 0; | 750 | return 0; |
707 | } | 751 | } |
708 | #endif | 752 | #endif |
@@ -712,77 +756,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
712 | int ret; | 756 | int ret; |
713 | 757 | ||
714 | switch (request) { | 758 | switch (request) { |
715 | /* | ||
716 | * read word at location "addr" in the child process. | ||
717 | */ | ||
718 | case PTRACE_PEEKTEXT: | ||
719 | case PTRACE_PEEKDATA: | ||
720 | ret = generic_ptrace_peekdata(child, addr, data); | ||
721 | break; | ||
722 | |||
723 | case PTRACE_PEEKUSR: | 759 | case PTRACE_PEEKUSR: |
724 | ret = ptrace_read_user(child, addr, (unsigned long __user *)data); | 760 | ret = ptrace_read_user(child, addr, (unsigned long __user *)data); |
725 | break; | 761 | break; |
726 | 762 | ||
727 | /* | ||
728 | * write the word at location addr. | ||
729 | */ | ||
730 | case PTRACE_POKETEXT: | ||
731 | case PTRACE_POKEDATA: | ||
732 | ret = generic_ptrace_pokedata(child, addr, data); | ||
733 | break; | ||
734 | |||
735 | case PTRACE_POKEUSR: | 763 | case PTRACE_POKEUSR: |
736 | ret = ptrace_write_user(child, addr, data); | 764 | ret = ptrace_write_user(child, addr, data); |
737 | break; | 765 | break; |
738 | 766 | ||
739 | /* | ||
740 | * continue/restart and stop at next (return from) syscall | ||
741 | */ | ||
742 | case PTRACE_SYSCALL: | ||
743 | case PTRACE_CONT: | ||
744 | ret = -EIO; | ||
745 | if (!valid_signal(data)) | ||
746 | break; | ||
747 | if (request == PTRACE_SYSCALL) | ||
748 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
749 | else | ||
750 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
751 | child->exit_code = data; | ||
752 | single_step_disable(child); | ||
753 | wake_up_process(child); | ||
754 | ret = 0; | ||
755 | break; | ||
756 | |||
757 | /* | ||
758 | * make the child exit. Best I can do is send it a sigkill. | ||
759 | * perhaps it should be put in the status that it wants to | ||
760 | * exit. | ||
761 | */ | ||
762 | case PTRACE_KILL: | ||
763 | single_step_disable(child); | ||
764 | if (child->exit_state != EXIT_ZOMBIE) { | ||
765 | child->exit_code = SIGKILL; | ||
766 | wake_up_process(child); | ||
767 | } | ||
768 | ret = 0; | ||
769 | break; | ||
770 | |||
771 | /* | ||
772 | * execute single instruction. | ||
773 | */ | ||
774 | case PTRACE_SINGLESTEP: | ||
775 | ret = -EIO; | ||
776 | if (!valid_signal(data)) | ||
777 | break; | ||
778 | single_step_enable(child); | ||
779 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
780 | child->exit_code = data; | ||
781 | /* give it a chance to run. */ | ||
782 | wake_up_process(child); | ||
783 | ret = 0; | ||
784 | break; | ||
785 | |||
786 | case PTRACE_GETREGS: | 767 | case PTRACE_GETREGS: |
787 | ret = ptrace_getregs(child, (void __user *)data); | 768 | ret = ptrace_getregs(child, (void __user *)data); |
788 | break; | 769 | break; |
diff --git a/arch/arm/kernel/ptrace.h b/arch/arm/kernel/ptrace.h index def3b6184a79..3926605b82ea 100644 --- a/arch/arm/kernel/ptrace.h +++ b/arch/arm/kernel/ptrace.h | |||
@@ -14,20 +14,6 @@ extern void ptrace_set_bpt(struct task_struct *); | |||
14 | extern void ptrace_break(struct task_struct *, struct pt_regs *); | 14 | extern void ptrace_break(struct task_struct *, struct pt_regs *); |
15 | 15 | ||
16 | /* | 16 | /* |
17 | * make sure single-step breakpoint is gone. | ||
18 | */ | ||
19 | static inline void single_step_disable(struct task_struct *task) | ||
20 | { | ||
21 | task->ptrace &= ~PT_SINGLESTEP; | ||
22 | ptrace_cancel_bpt(task); | ||
23 | } | ||
24 | |||
25 | static inline void single_step_enable(struct task_struct *task) | ||
26 | { | ||
27 | task->ptrace |= PT_SINGLESTEP; | ||
28 | } | ||
29 | |||
30 | /* | ||
31 | * Send SIGTRAP if we're single-stepping | 17 | * Send SIGTRAP if we're single-stepping |
32 | */ | 18 | */ |
33 | static inline void single_step_trap(struct task_struct *task) | 19 | static inline void single_step_trap(struct task_struct *task) |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index c6c57b640b6b..c91c77b54dea 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/proc_fs.h> | ||
27 | 28 | ||
28 | #include <asm/unified.h> | 29 | #include <asm/unified.h> |
29 | #include <asm/cpu.h> | 30 | #include <asm/cpu.h> |
@@ -102,6 +103,7 @@ struct cpu_cache_fns cpu_cache; | |||
102 | #endif | 103 | #endif |
103 | #ifdef CONFIG_OUTER_CACHE | 104 | #ifdef CONFIG_OUTER_CACHE |
104 | struct outer_cache_fns outer_cache; | 105 | struct outer_cache_fns outer_cache; |
106 | EXPORT_SYMBOL(outer_cache); | ||
105 | #endif | 107 | #endif |
106 | 108 | ||
107 | struct stack { | 109 | struct stack { |
@@ -117,7 +119,7 @@ EXPORT_SYMBOL(elf_platform); | |||
117 | 119 | ||
118 | static const char *cpu_name; | 120 | static const char *cpu_name; |
119 | static const char *machine_name; | 121 | static const char *machine_name; |
120 | static char __initdata command_line[COMMAND_LINE_SIZE]; | 122 | static char __initdata cmd_line[COMMAND_LINE_SIZE]; |
121 | 123 | ||
122 | static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; | 124 | static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; |
123 | static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; | 125 | static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; |
@@ -417,10 +419,11 @@ static int __init arm_add_memory(unsigned long start, unsigned long size) | |||
417 | * Pick out the memory size. We look for mem=size@start, | 419 | * Pick out the memory size. We look for mem=size@start, |
418 | * where start and size are "size[KkMm]" | 420 | * where start and size are "size[KkMm]" |
419 | */ | 421 | */ |
420 | static void __init early_mem(char **p) | 422 | static int __init early_mem(char *p) |
421 | { | 423 | { |
422 | static int usermem __initdata = 0; | 424 | static int usermem __initdata = 0; |
423 | unsigned long size, start; | 425 | unsigned long size, start; |
426 | char *endp; | ||
424 | 427 | ||
425 | /* | 428 | /* |
426 | * If the user specifies memory size, we | 429 | * If the user specifies memory size, we |
@@ -433,52 +436,15 @@ static void __init early_mem(char **p) | |||
433 | } | 436 | } |
434 | 437 | ||
435 | start = PHYS_OFFSET; | 438 | start = PHYS_OFFSET; |
436 | size = memparse(*p, p); | 439 | size = memparse(p, &endp); |
437 | if (**p == '@') | 440 | if (*endp == '@') |
438 | start = memparse(*p + 1, p); | 441 | start = memparse(endp + 1, NULL); |
439 | 442 | ||
440 | arm_add_memory(start, size); | 443 | arm_add_memory(start, size); |
441 | } | ||
442 | __early_param("mem=", early_mem); | ||
443 | 444 | ||
444 | /* | 445 | return 0; |
445 | * Initial parsing of the command line. | ||
446 | */ | ||
447 | static void __init parse_cmdline(char **cmdline_p, char *from) | ||
448 | { | ||
449 | char c = ' ', *to = command_line; | ||
450 | int len = 0; | ||
451 | |||
452 | for (;;) { | ||
453 | if (c == ' ') { | ||
454 | extern struct early_params __early_begin, __early_end; | ||
455 | struct early_params *p; | ||
456 | |||
457 | for (p = &__early_begin; p < &__early_end; p++) { | ||
458 | int arglen = strlen(p->arg); | ||
459 | |||
460 | if (memcmp(from, p->arg, arglen) == 0) { | ||
461 | if (to != command_line) | ||
462 | to -= 1; | ||
463 | from += arglen; | ||
464 | p->fn(&from); | ||
465 | |||
466 | while (*from != ' ' && *from != '\0') | ||
467 | from++; | ||
468 | break; | ||
469 | } | ||
470 | } | ||
471 | } | ||
472 | c = *from++; | ||
473 | if (!c) | ||
474 | break; | ||
475 | if (COMMAND_LINE_SIZE <= ++len) | ||
476 | break; | ||
477 | *to++ = c; | ||
478 | } | ||
479 | *to = '\0'; | ||
480 | *cmdline_p = command_line; | ||
481 | } | 446 | } |
447 | early_param("mem", early_mem); | ||
482 | 448 | ||
483 | static void __init | 449 | static void __init |
484 | setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) | 450 | setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) |
@@ -739,9 +705,15 @@ void __init setup_arch(char **cmdline_p) | |||
739 | init_mm.end_data = (unsigned long) _edata; | 705 | init_mm.end_data = (unsigned long) _edata; |
740 | init_mm.brk = (unsigned long) _end; | 706 | init_mm.brk = (unsigned long) _end; |
741 | 707 | ||
742 | memcpy(boot_command_line, from, COMMAND_LINE_SIZE); | 708 | /* parse_early_param needs a boot_command_line */ |
743 | boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; | 709 | strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); |
744 | parse_cmdline(cmdline_p, from); | 710 | |
711 | /* populate cmd_line too for later use, preserving boot_command_line */ | ||
712 | strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); | ||
713 | *cmdline_p = cmd_line; | ||
714 | |||
715 | parse_early_param(); | ||
716 | |||
745 | paging_init(mdesc); | 717 | paging_init(mdesc); |
746 | request_standard_resources(&meminfo, mdesc); | 718 | request_standard_resources(&meminfo, mdesc); |
747 | 719 | ||
@@ -782,9 +754,21 @@ static int __init topology_init(void) | |||
782 | 754 | ||
783 | return 0; | 755 | return 0; |
784 | } | 756 | } |
785 | |||
786 | subsys_initcall(topology_init); | 757 | subsys_initcall(topology_init); |
787 | 758 | ||
759 | #ifdef CONFIG_HAVE_PROC_CPU | ||
760 | static int __init proc_cpu_init(void) | ||
761 | { | ||
762 | struct proc_dir_entry *res; | ||
763 | |||
764 | res = proc_mkdir("cpu", NULL); | ||
765 | if (!res) | ||
766 | return -ENOMEM; | ||
767 | return 0; | ||
768 | } | ||
769 | fs_initcall(proc_cpu_init); | ||
770 | #endif | ||
771 | |||
788 | static const char *hwcap_str[] = { | 772 | static const char *hwcap_str[] = { |
789 | "swp", | 773 | "swp", |
790 | "half", | 774 | "half", |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index e7714f367eb8..907d5a620bca 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/ucontext.h> | 19 | #include <asm/ucontext.h> |
20 | #include <asm/unistd.h> | 20 | #include <asm/unistd.h> |
21 | #include <asm/vfp.h> | ||
21 | 22 | ||
22 | #include "ptrace.h" | 23 | #include "ptrace.h" |
23 | #include "signal.h" | 24 | #include "signal.h" |
@@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) | |||
175 | 176 | ||
176 | #endif | 177 | #endif |
177 | 178 | ||
179 | #ifdef CONFIG_VFP | ||
180 | |||
181 | static int preserve_vfp_context(struct vfp_sigframe __user *frame) | ||
182 | { | ||
183 | struct thread_info *thread = current_thread_info(); | ||
184 | struct vfp_hard_struct *h = &thread->vfpstate.hard; | ||
185 | const unsigned long magic = VFP_MAGIC; | ||
186 | const unsigned long size = VFP_STORAGE_SIZE; | ||
187 | int err = 0; | ||
188 | |||
189 | vfp_sync_hwstate(thread); | ||
190 | __put_user_error(magic, &frame->magic, err); | ||
191 | __put_user_error(size, &frame->size, err); | ||
192 | |||
193 | /* | ||
194 | * Copy the floating point registers. There can be unused | ||
195 | * registers see asm/hwcap.h for details. | ||
196 | */ | ||
197 | err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, | ||
198 | sizeof(h->fpregs)); | ||
199 | /* | ||
200 | * Copy the status and control register. | ||
201 | */ | ||
202 | __put_user_error(h->fpscr, &frame->ufp.fpscr, err); | ||
203 | |||
204 | /* | ||
205 | * Copy the exception registers. | ||
206 | */ | ||
207 | __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); | ||
208 | __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); | ||
209 | __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); | ||
210 | |||
211 | return err ? -EFAULT : 0; | ||
212 | } | ||
213 | |||
214 | static int restore_vfp_context(struct vfp_sigframe __user *frame) | ||
215 | { | ||
216 | struct thread_info *thread = current_thread_info(); | ||
217 | struct vfp_hard_struct *h = &thread->vfpstate.hard; | ||
218 | unsigned long magic; | ||
219 | unsigned long size; | ||
220 | unsigned long fpexc; | ||
221 | int err = 0; | ||
222 | |||
223 | __get_user_error(magic, &frame->magic, err); | ||
224 | __get_user_error(size, &frame->size, err); | ||
225 | |||
226 | if (err) | ||
227 | return -EFAULT; | ||
228 | if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) | ||
229 | return -EINVAL; | ||
230 | |||
231 | /* | ||
232 | * Copy the floating point registers. There can be unused | ||
233 | * registers see asm/hwcap.h for details. | ||
234 | */ | ||
235 | err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, | ||
236 | sizeof(h->fpregs)); | ||
237 | /* | ||
238 | * Copy the status and control register. | ||
239 | */ | ||
240 | __get_user_error(h->fpscr, &frame->ufp.fpscr, err); | ||
241 | |||
242 | /* | ||
243 | * Sanitise and restore the exception registers. | ||
244 | */ | ||
245 | __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); | ||
246 | /* Ensure the VFP is enabled. */ | ||
247 | fpexc |= FPEXC_EN; | ||
248 | /* Ensure FPINST2 is invalid and the exception flag is cleared. */ | ||
249 | fpexc &= ~(FPEXC_EX | FPEXC_FP2V); | ||
250 | h->fpexc = fpexc; | ||
251 | |||
252 | __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); | ||
253 | __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); | ||
254 | |||
255 | if (!err) | ||
256 | vfp_flush_hwstate(thread); | ||
257 | |||
258 | return err ? -EFAULT : 0; | ||
259 | } | ||
260 | |||
261 | #endif | ||
262 | |||
178 | /* | 263 | /* |
179 | * Do a signal return; undo the signal stack. These are aligned to 64-bit. | 264 | * Do a signal return; undo the signal stack. These are aligned to 64-bit. |
180 | */ | 265 | */ |
@@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) | |||
233 | err |= restore_iwmmxt_context(&aux->iwmmxt); | 318 | err |= restore_iwmmxt_context(&aux->iwmmxt); |
234 | #endif | 319 | #endif |
235 | #ifdef CONFIG_VFP | 320 | #ifdef CONFIG_VFP |
236 | // if (err == 0) | 321 | if (err == 0) |
237 | // err |= vfp_restore_state(&sf->aux.vfp); | 322 | err |= restore_vfp_context(&aux->vfp); |
238 | #endif | 323 | #endif |
239 | 324 | ||
240 | return err; | 325 | return err; |
@@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) | |||
348 | err |= preserve_iwmmxt_context(&aux->iwmmxt); | 433 | err |= preserve_iwmmxt_context(&aux->iwmmxt); |
349 | #endif | 434 | #endif |
350 | #ifdef CONFIG_VFP | 435 | #ifdef CONFIG_VFP |
351 | // if (err == 0) | 436 | if (err == 0) |
352 | // err |= vfp_save_state(&sf->aux.vfp); | 437 | err |= preserve_vfp_context(&aux->vfp); |
353 | #endif | 438 | #endif |
354 | __put_user_error(0, &aux->end_magic, err); | 439 | __put_user_error(0, &aux->end_magic, err); |
355 | 440 | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 57162af53dc9..a01194e583ff 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -86,6 +86,12 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
86 | return PTR_ERR(idle); | 86 | return PTR_ERR(idle); |
87 | } | 87 | } |
88 | ci->idle = idle; | 88 | ci->idle = idle; |
89 | } else { | ||
90 | /* | ||
91 | * Since this idle thread is being re-used, call | ||
92 | * init_idle() to reinitialize the thread structure. | ||
93 | */ | ||
94 | init_idle(idle, cpu); | ||
89 | } | 95 | } |
90 | 96 | ||
91 | /* | 97 | /* |
@@ -99,6 +105,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
99 | *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | | 105 | *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | |
100 | PMD_TYPE_SECT | PMD_SECT_AP_WRITE); | 106 | PMD_TYPE_SECT | PMD_SECT_AP_WRITE); |
101 | flush_pmd_entry(pmd); | 107 | flush_pmd_entry(pmd); |
108 | outer_clean_range(__pa(pmd), __pa(pmd + 1)); | ||
102 | 109 | ||
103 | /* | 110 | /* |
104 | * We need to tell the secondary core where to find | 111 | * We need to tell the secondary core where to find |
@@ -106,7 +113,8 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
106 | */ | 113 | */ |
107 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; | 114 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; |
108 | secondary_data.pgdir = virt_to_phys(pgd); | 115 | secondary_data.pgdir = virt_to_phys(pgd); |
109 | wmb(); | 116 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
117 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); | ||
110 | 118 | ||
111 | /* | 119 | /* |
112 | * Now bring the CPU into our world. | 120 | * Now bring the CPU into our world. |
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index a73a34dccf2a..7c5f0c024db7 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c | |||
@@ -21,23 +21,6 @@ | |||
21 | #include <asm/smp_twd.h> | 21 | #include <asm/smp_twd.h> |
22 | #include <asm/hardware/gic.h> | 22 | #include <asm/hardware/gic.h> |
23 | 23 | ||
24 | #define TWD_TIMER_LOAD 0x00 | ||
25 | #define TWD_TIMER_COUNTER 0x04 | ||
26 | #define TWD_TIMER_CONTROL 0x08 | ||
27 | #define TWD_TIMER_INTSTAT 0x0C | ||
28 | |||
29 | #define TWD_WDOG_LOAD 0x20 | ||
30 | #define TWD_WDOG_COUNTER 0x24 | ||
31 | #define TWD_WDOG_CONTROL 0x28 | ||
32 | #define TWD_WDOG_INTSTAT 0x2C | ||
33 | #define TWD_WDOG_RESETSTAT 0x30 | ||
34 | #define TWD_WDOG_DISABLE 0x34 | ||
35 | |||
36 | #define TWD_TIMER_CONTROL_ENABLE (1 << 0) | ||
37 | #define TWD_TIMER_CONTROL_ONESHOT (0 << 1) | ||
38 | #define TWD_TIMER_CONTROL_PERIODIC (1 << 1) | ||
39 | #define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2) | ||
40 | |||
41 | /* set up by the platform code */ | 24 | /* set up by the platform code */ |
42 | void __iomem *twd_base; | 25 | void __iomem *twd_base; |
43 | 26 | ||
@@ -160,6 +143,7 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk) | |||
160 | 143 | ||
161 | /* Make sure our local interrupt controller has this enabled */ | 144 | /* Make sure our local interrupt controller has this enabled */ |
162 | local_irq_save(flags); | 145 | local_irq_save(flags); |
146 | irq_to_desc(clk->irq)->status |= IRQ_NOPROBE; | ||
163 | get_irq_chip(clk->irq)->unmask(clk->irq); | 147 | get_irq_chip(clk->irq)->unmask(clk->irq); |
164 | local_irq_restore(flags); | 148 | local_irq_restore(flags); |
165 | 149 | ||
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index 78ecaac65206..c23501842b98 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/slab.h> | ||
19 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
20 | #include <linux/sem.h> | 19 | #include <linux/sem.h> |
21 | #include <linux/msg.h> | 20 | #include <linux/msg.h> |
@@ -27,188 +26,7 @@ | |||
27 | #include <linux/file.h> | 26 | #include <linux/file.h> |
28 | #include <linux/ipc.h> | 27 | #include <linux/ipc.h> |
29 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
30 | 29 | #include <linux/slab.h> | |
31 | extern unsigned long do_mremap(unsigned long addr, unsigned long old_len, | ||
32 | unsigned long new_len, unsigned long flags, | ||
33 | unsigned long new_addr); | ||
34 | |||
35 | /* common code for old and new mmaps */ | ||
36 | inline long do_mmap2( | ||
37 | unsigned long addr, unsigned long len, | ||
38 | unsigned long prot, unsigned long flags, | ||
39 | unsigned long fd, unsigned long pgoff) | ||
40 | { | ||
41 | int error = -EINVAL; | ||
42 | struct file * file = NULL; | ||
43 | |||
44 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
45 | |||
46 | if (flags & MAP_FIXED && addr < FIRST_USER_ADDRESS) | ||
47 | goto out; | ||
48 | |||
49 | error = -EBADF; | ||
50 | if (!(flags & MAP_ANONYMOUS)) { | ||
51 | file = fget(fd); | ||
52 | if (!file) | ||
53 | goto out; | ||
54 | } | ||
55 | |||
56 | down_write(¤t->mm->mmap_sem); | ||
57 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
58 | up_write(¤t->mm->mmap_sem); | ||
59 | |||
60 | if (file) | ||
61 | fput(file); | ||
62 | out: | ||
63 | return error; | ||
64 | } | ||
65 | |||
66 | struct mmap_arg_struct { | ||
67 | unsigned long addr; | ||
68 | unsigned long len; | ||
69 | unsigned long prot; | ||
70 | unsigned long flags; | ||
71 | unsigned long fd; | ||
72 | unsigned long offset; | ||
73 | }; | ||
74 | |||
75 | asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) | ||
76 | { | ||
77 | int error = -EFAULT; | ||
78 | struct mmap_arg_struct a; | ||
79 | |||
80 | if (copy_from_user(&a, arg, sizeof(a))) | ||
81 | goto out; | ||
82 | |||
83 | error = -EINVAL; | ||
84 | if (a.offset & ~PAGE_MASK) | ||
85 | goto out; | ||
86 | |||
87 | error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); | ||
88 | out: | ||
89 | return error; | ||
90 | } | ||
91 | |||
92 | asmlinkage unsigned long | ||
93 | sys_arm_mremap(unsigned long addr, unsigned long old_len, | ||
94 | unsigned long new_len, unsigned long flags, | ||
95 | unsigned long new_addr) | ||
96 | { | ||
97 | unsigned long ret = -EINVAL; | ||
98 | |||
99 | if (flags & MREMAP_FIXED && new_addr < FIRST_USER_ADDRESS) | ||
100 | goto out; | ||
101 | |||
102 | down_write(¤t->mm->mmap_sem); | ||
103 | ret = do_mremap(addr, old_len, new_len, flags, new_addr); | ||
104 | up_write(¤t->mm->mmap_sem); | ||
105 | |||
106 | out: | ||
107 | return ret; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * Perform the select(nd, in, out, ex, tv) and mmap() system | ||
112 | * calls. | ||
113 | */ | ||
114 | |||
115 | struct sel_arg_struct { | ||
116 | unsigned long n; | ||
117 | fd_set __user *inp, *outp, *exp; | ||
118 | struct timeval __user *tvp; | ||
119 | }; | ||
120 | |||
121 | asmlinkage int old_select(struct sel_arg_struct __user *arg) | ||
122 | { | ||
123 | struct sel_arg_struct a; | ||
124 | |||
125 | if (copy_from_user(&a, arg, sizeof(a))) | ||
126 | return -EFAULT; | ||
127 | /* sys_select() does the appropriate kernel locking */ | ||
128 | return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); | ||
129 | } | ||
130 | |||
131 | #if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) | ||
132 | /* | ||
133 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | ||
134 | * | ||
135 | * This is really horribly ugly. | ||
136 | */ | ||
137 | asmlinkage int sys_ipc(uint call, int first, int second, int third, | ||
138 | void __user *ptr, long fifth) | ||
139 | { | ||
140 | int version, ret; | ||
141 | |||
142 | version = call >> 16; /* hack for backward compatibility */ | ||
143 | call &= 0xffff; | ||
144 | |||
145 | switch (call) { | ||
146 | case SEMOP: | ||
147 | return sys_semtimedop (first, (struct sembuf __user *)ptr, second, NULL); | ||
148 | case SEMTIMEDOP: | ||
149 | return sys_semtimedop(first, (struct sembuf __user *)ptr, second, | ||
150 | (const struct timespec __user *)fifth); | ||
151 | |||
152 | case SEMGET: | ||
153 | return sys_semget (first, second, third); | ||
154 | case SEMCTL: { | ||
155 | union semun fourth; | ||
156 | if (!ptr) | ||
157 | return -EINVAL; | ||
158 | if (get_user(fourth.__pad, (void __user * __user *) ptr)) | ||
159 | return -EFAULT; | ||
160 | return sys_semctl (first, second, third, fourth); | ||
161 | } | ||
162 | |||
163 | case MSGSND: | ||
164 | return sys_msgsnd(first, (struct msgbuf __user *) ptr, | ||
165 | second, third); | ||
166 | case MSGRCV: | ||
167 | switch (version) { | ||
168 | case 0: { | ||
169 | struct ipc_kludge tmp; | ||
170 | if (!ptr) | ||
171 | return -EINVAL; | ||
172 | if (copy_from_user(&tmp,(struct ipc_kludge __user *)ptr, | ||
173 | sizeof (tmp))) | ||
174 | return -EFAULT; | ||
175 | return sys_msgrcv (first, tmp.msgp, second, | ||
176 | tmp.msgtyp, third); | ||
177 | } | ||
178 | default: | ||
179 | return sys_msgrcv (first, | ||
180 | (struct msgbuf __user *) ptr, | ||
181 | second, fifth, third); | ||
182 | } | ||
183 | case MSGGET: | ||
184 | return sys_msgget ((key_t) first, second); | ||
185 | case MSGCTL: | ||
186 | return sys_msgctl(first, second, (struct msqid_ds __user *)ptr); | ||
187 | |||
188 | case SHMAT: | ||
189 | switch (version) { | ||
190 | default: { | ||
191 | ulong raddr; | ||
192 | ret = do_shmat(first, (char __user *)ptr, second, &raddr); | ||
193 | if (ret) | ||
194 | return ret; | ||
195 | return put_user(raddr, (ulong __user *)third); | ||
196 | } | ||
197 | case 1: /* Of course, we don't support iBCS2! */ | ||
198 | return -EINVAL; | ||
199 | } | ||
200 | case SHMDT: | ||
201 | return sys_shmdt ((char __user *)ptr); | ||
202 | case SHMGET: | ||
203 | return sys_shmget (first, second, third); | ||
204 | case SHMCTL: | ||
205 | return sys_shmctl (first, second, | ||
206 | (struct shmid_ds __user *) ptr); | ||
207 | default: | ||
208 | return -ENOSYS; | ||
209 | } | ||
210 | } | ||
211 | #endif | ||
212 | 30 | ||
213 | /* Fork a new task - this creates a new program thread. | 31 | /* Fork a new task - this creates a new program thread. |
214 | * This is called indirectly via a small wrapper | 32 | * This is called indirectly via a small wrapper |
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index d59a0cd537f0..33ff678e32f2 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c | |||
@@ -346,9 +346,6 @@ asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops, | |||
346 | return sys_oabi_semtimedop(semid, tsops, nsops, NULL); | 346 | return sys_oabi_semtimedop(semid, tsops, nsops, NULL); |
347 | } | 347 | } |
348 | 348 | ||
349 | extern asmlinkage int sys_ipc(uint call, int first, int second, int third, | ||
350 | void __user *ptr, long fifth); | ||
351 | |||
352 | asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, | 349 | asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, |
353 | void __user *ptr, long fifth) | 350 | void __user *ptr, long fifth) |
354 | { | 351 | { |
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index d38cdf2c8276..28753805d2d1 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c | |||
@@ -10,11 +10,6 @@ | |||
10 | * | 10 | * |
11 | * This file contains the ARM-specific time handling details: | 11 | * This file contains the ARM-specific time handling details: |
12 | * reading the RTC at bootup, etc... | 12 | * reading the RTC at bootup, etc... |
13 | * | ||
14 | * 1994-07-02 Alan Modra | ||
15 | * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime | ||
16 | * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 | ||
17 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | ||
18 | */ | 13 | */ |
19 | #include <linux/module.h> | 14 | #include <linux/module.h> |
20 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
@@ -77,11 +72,6 @@ unsigned long profile_pc(struct pt_regs *regs) | |||
77 | EXPORT_SYMBOL(profile_pc); | 72 | EXPORT_SYMBOL(profile_pc); |
78 | #endif | 73 | #endif |
79 | 74 | ||
80 | /* | ||
81 | * hook for setting the RTC's idea of the current time. | ||
82 | */ | ||
83 | int (*set_rtc)(void); | ||
84 | |||
85 | #ifndef CONFIG_GENERIC_TIME | 75 | #ifndef CONFIG_GENERIC_TIME |
86 | static unsigned long dummy_gettimeoffset(void) | 76 | static unsigned long dummy_gettimeoffset(void) |
87 | { | 77 | { |
@@ -89,140 +79,6 @@ static unsigned long dummy_gettimeoffset(void) | |||
89 | } | 79 | } |
90 | #endif | 80 | #endif |
91 | 81 | ||
92 | static unsigned long next_rtc_update; | ||
93 | |||
94 | /* | ||
95 | * If we have an externally synchronized linux clock, then update | ||
96 | * CMOS clock accordingly every ~11 minutes. set_rtc() has to be | ||
97 | * called as close as possible to 500 ms before the new second | ||
98 | * starts. | ||
99 | */ | ||
100 | static inline void do_set_rtc(void) | ||
101 | { | ||
102 | if (!ntp_synced() || set_rtc == NULL) | ||
103 | return; | ||
104 | |||
105 | if (next_rtc_update && | ||
106 | time_before((unsigned long)xtime.tv_sec, next_rtc_update)) | ||
107 | return; | ||
108 | |||
109 | if (xtime.tv_nsec < 500000000 - ((unsigned) tick_nsec >> 1) && | ||
110 | xtime.tv_nsec >= 500000000 + ((unsigned) tick_nsec >> 1)) | ||
111 | return; | ||
112 | |||
113 | if (set_rtc()) | ||
114 | /* | ||
115 | * rtc update failed. Try again in 60s | ||
116 | */ | ||
117 | next_rtc_update = xtime.tv_sec + 60; | ||
118 | else | ||
119 | next_rtc_update = xtime.tv_sec + 660; | ||
120 | } | ||
121 | |||
122 | #ifdef CONFIG_LEDS | ||
123 | |||
124 | static void dummy_leds_event(led_event_t evt) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | void (*leds_event)(led_event_t) = dummy_leds_event; | ||
129 | |||
130 | struct leds_evt_name { | ||
131 | const char name[8]; | ||
132 | int on; | ||
133 | int off; | ||
134 | }; | ||
135 | |||
136 | static const struct leds_evt_name evt_names[] = { | ||
137 | { "amber", led_amber_on, led_amber_off }, | ||
138 | { "blue", led_blue_on, led_blue_off }, | ||
139 | { "green", led_green_on, led_green_off }, | ||
140 | { "red", led_red_on, led_red_off }, | ||
141 | }; | ||
142 | |||
143 | static ssize_t leds_store(struct sys_device *dev, | ||
144 | struct sysdev_attribute *attr, | ||
145 | const char *buf, size_t size) | ||
146 | { | ||
147 | int ret = -EINVAL, len = strcspn(buf, " "); | ||
148 | |||
149 | if (len > 0 && buf[len] == '\0') | ||
150 | len--; | ||
151 | |||
152 | if (strncmp(buf, "claim", len) == 0) { | ||
153 | leds_event(led_claim); | ||
154 | ret = size; | ||
155 | } else if (strncmp(buf, "release", len) == 0) { | ||
156 | leds_event(led_release); | ||
157 | ret = size; | ||
158 | } else { | ||
159 | int i; | ||
160 | |||
161 | for (i = 0; i < ARRAY_SIZE(evt_names); i++) { | ||
162 | if (strlen(evt_names[i].name) != len || | ||
163 | strncmp(buf, evt_names[i].name, len) != 0) | ||
164 | continue; | ||
165 | if (strncmp(buf+len, " on", 3) == 0) { | ||
166 | leds_event(evt_names[i].on); | ||
167 | ret = size; | ||
168 | } else if (strncmp(buf+len, " off", 4) == 0) { | ||
169 | leds_event(evt_names[i].off); | ||
170 | ret = size; | ||
171 | } | ||
172 | break; | ||
173 | } | ||
174 | } | ||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | static SYSDEV_ATTR(event, 0200, NULL, leds_store); | ||
179 | |||
180 | static int leds_suspend(struct sys_device *dev, pm_message_t state) | ||
181 | { | ||
182 | leds_event(led_stop); | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static int leds_resume(struct sys_device *dev) | ||
187 | { | ||
188 | leds_event(led_start); | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static int leds_shutdown(struct sys_device *dev) | ||
193 | { | ||
194 | leds_event(led_halted); | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static struct sysdev_class leds_sysclass = { | ||
199 | .name = "leds", | ||
200 | .shutdown = leds_shutdown, | ||
201 | .suspend = leds_suspend, | ||
202 | .resume = leds_resume, | ||
203 | }; | ||
204 | |||
205 | static struct sys_device leds_device = { | ||
206 | .id = 0, | ||
207 | .cls = &leds_sysclass, | ||
208 | }; | ||
209 | |||
210 | static int __init leds_init(void) | ||
211 | { | ||
212 | int ret; | ||
213 | ret = sysdev_class_register(&leds_sysclass); | ||
214 | if (ret == 0) | ||
215 | ret = sysdev_register(&leds_device); | ||
216 | if (ret == 0) | ||
217 | ret = sysdev_create_file(&leds_device, &attr_event); | ||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | device_initcall(leds_init); | ||
222 | |||
223 | EXPORT_SYMBOL(leds_event); | ||
224 | #endif | ||
225 | |||
226 | #ifdef CONFIG_LEDS_TIMER | 82 | #ifdef CONFIG_LEDS_TIMER |
227 | static inline void do_leds(void) | 83 | static inline void do_leds(void) |
228 | { | 84 | { |
@@ -295,39 +151,6 @@ int do_settimeofday(struct timespec *tv) | |||
295 | EXPORT_SYMBOL(do_settimeofday); | 151 | EXPORT_SYMBOL(do_settimeofday); |
296 | #endif /* !CONFIG_GENERIC_TIME */ | 152 | #endif /* !CONFIG_GENERIC_TIME */ |
297 | 153 | ||
298 | /** | ||
299 | * save_time_delta - Save the offset between system time and RTC time | ||
300 | * @delta: pointer to timespec to store delta | ||
301 | * @rtc: pointer to timespec for current RTC time | ||
302 | * | ||
303 | * Return a delta between the system time and the RTC time, such | ||
304 | * that system time can be restored later with restore_time_delta() | ||
305 | */ | ||
306 | void save_time_delta(struct timespec *delta, struct timespec *rtc) | ||
307 | { | ||
308 | set_normalized_timespec(delta, | ||
309 | xtime.tv_sec - rtc->tv_sec, | ||
310 | xtime.tv_nsec - rtc->tv_nsec); | ||
311 | } | ||
312 | EXPORT_SYMBOL(save_time_delta); | ||
313 | |||
314 | /** | ||
315 | * restore_time_delta - Restore the current system time | ||
316 | * @delta: delta returned by save_time_delta() | ||
317 | * @rtc: pointer to timespec for current RTC time | ||
318 | */ | ||
319 | void restore_time_delta(struct timespec *delta, struct timespec *rtc) | ||
320 | { | ||
321 | struct timespec ts; | ||
322 | |||
323 | set_normalized_timespec(&ts, | ||
324 | delta->tv_sec + rtc->tv_sec, | ||
325 | delta->tv_nsec + rtc->tv_nsec); | ||
326 | |||
327 | do_settimeofday(&ts); | ||
328 | } | ||
329 | EXPORT_SYMBOL(restore_time_delta); | ||
330 | |||
331 | #ifndef CONFIG_GENERIC_CLOCKEVENTS | 154 | #ifndef CONFIG_GENERIC_CLOCKEVENTS |
332 | /* | 155 | /* |
333 | * Kernel system timer support. | 156 | * Kernel system timer support. |
@@ -336,7 +159,6 @@ void timer_tick(void) | |||
336 | { | 159 | { |
337 | profile_tick(CPU_PROFILING); | 160 | profile_tick(CPU_PROFILING); |
338 | do_leds(); | 161 | do_leds(); |
339 | do_set_rtc(); | ||
340 | write_seqlock(&xtime_lock); | 162 | write_seqlock(&xtime_lock); |
341 | do_timer(1); | 163 | do_timer(1); |
342 | write_sequnlock(&xtime_lock); | 164 | write_sequnlock(&xtime_lock); |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 3f361a783f43..1621e5327b2a 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -12,15 +12,17 @@ | |||
12 | * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably | 12 | * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably |
13 | * kill the offending process. | 13 | * kill the offending process. |
14 | */ | 14 | */ |
15 | #include <linux/module.h> | ||
16 | #include <linux/signal.h> | 15 | #include <linux/signal.h> |
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/personality.h> | 16 | #include <linux/personality.h> |
19 | #include <linux/kallsyms.h> | 17 | #include <linux/kallsyms.h> |
20 | #include <linux/delay.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/uaccess.h> | ||
21 | #include <linux/hardirq.h> | 20 | #include <linux/hardirq.h> |
21 | #include <linux/kdebug.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/kexec.h> | ||
24 | #include <linux/delay.h> | ||
22 | #include <linux/init.h> | 25 | #include <linux/init.h> |
23 | #include <linux/uaccess.h> | ||
24 | 26 | ||
25 | #include <asm/atomic.h> | 27 | #include <asm/atomic.h> |
26 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
@@ -224,14 +226,21 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) | |||
224 | #define S_SMP "" | 226 | #define S_SMP "" |
225 | #endif | 227 | #endif |
226 | 228 | ||
227 | static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) | 229 | static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) |
228 | { | 230 | { |
229 | struct task_struct *tsk = thread->task; | 231 | struct task_struct *tsk = thread->task; |
230 | static int die_counter; | 232 | static int die_counter; |
233 | int ret; | ||
231 | 234 | ||
232 | printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", | 235 | printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", |
233 | str, err, ++die_counter); | 236 | str, err, ++die_counter); |
234 | sysfs_printk_last_file(); | 237 | sysfs_printk_last_file(); |
238 | |||
239 | /* trap and error numbers are mostly meaningless on ARM */ | ||
240 | ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); | ||
241 | if (ret == NOTIFY_STOP) | ||
242 | return ret; | ||
243 | |||
235 | print_modules(); | 244 | print_modules(); |
236 | __show_regs(regs); | 245 | __show_regs(regs); |
237 | printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", | 246 | printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", |
@@ -243,6 +252,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p | |||
243 | dump_backtrace(regs, tsk); | 252 | dump_backtrace(regs, tsk); |
244 | dump_instr(KERN_EMERG, regs); | 253 | dump_instr(KERN_EMERG, regs); |
245 | } | 254 | } |
255 | |||
256 | return ret; | ||
246 | } | 257 | } |
247 | 258 | ||
248 | DEFINE_SPINLOCK(die_lock); | 259 | DEFINE_SPINLOCK(die_lock); |
@@ -250,16 +261,21 @@ DEFINE_SPINLOCK(die_lock); | |||
250 | /* | 261 | /* |
251 | * This function is protected against re-entrancy. | 262 | * This function is protected against re-entrancy. |
252 | */ | 263 | */ |
253 | NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) | 264 | void die(const char *str, struct pt_regs *regs, int err) |
254 | { | 265 | { |
255 | struct thread_info *thread = current_thread_info(); | 266 | struct thread_info *thread = current_thread_info(); |
267 | int ret; | ||
256 | 268 | ||
257 | oops_enter(); | 269 | oops_enter(); |
258 | 270 | ||
259 | spin_lock_irq(&die_lock); | 271 | spin_lock_irq(&die_lock); |
260 | console_verbose(); | 272 | console_verbose(); |
261 | bust_spinlocks(1); | 273 | bust_spinlocks(1); |
262 | __die(str, err, thread, regs); | 274 | ret = __die(str, err, thread, regs); |
275 | |||
276 | if (regs && kexec_should_crash(thread->task)) | ||
277 | crash_kexec(regs); | ||
278 | |||
263 | bust_spinlocks(0); | 279 | bust_spinlocks(0); |
264 | add_taint(TAINT_DIE); | 280 | add_taint(TAINT_DIE); |
265 | spin_unlock_irq(&die_lock); | 281 | spin_unlock_irq(&die_lock); |
@@ -267,11 +283,10 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) | |||
267 | 283 | ||
268 | if (in_interrupt()) | 284 | if (in_interrupt()) |
269 | panic("Fatal exception in interrupt"); | 285 | panic("Fatal exception in interrupt"); |
270 | |||
271 | if (panic_on_oops) | 286 | if (panic_on_oops) |
272 | panic("Fatal exception"); | 287 | panic("Fatal exception"); |
273 | 288 | if (ret != NOTIFY_STOP) | |
274 | do_exit(SIGSEGV); | 289 | do_exit(SIGSEGV); |
275 | } | 290 | } |
276 | 291 | ||
277 | void arm_notify_die(const char *str, struct pt_regs *regs, | 292 | void arm_notify_die(const char *str, struct pt_regs *regs, |
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 786ac2b6914a..50292cd9c120 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c | |||
@@ -359,7 +359,9 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) | |||
359 | frame.fp = regs->ARM_fp; | 359 | frame.fp = regs->ARM_fp; |
360 | frame.sp = regs->ARM_sp; | 360 | frame.sp = regs->ARM_sp; |
361 | frame.lr = regs->ARM_lr; | 361 | frame.lr = regs->ARM_lr; |
362 | frame.pc = regs->ARM_pc; | 362 | /* PC might be corrupted, use LR in that case. */ |
363 | frame.pc = kernel_text_address(regs->ARM_pc) | ||
364 | ? regs->ARM_pc : regs->ARM_lr; | ||
363 | } else if (tsk == current) { | 365 | } else if (tsk == current) { |
364 | frame.fp = (unsigned long)__builtin_frame_address(0); | 366 | frame.fp = (unsigned long)__builtin_frame_address(0); |
365 | frame.sp = current_sp; | 367 | frame.sp = current_sp; |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index aecf87dfbaec..b16c07914b55 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -24,13 +24,11 @@ SECTIONS | |||
24 | #else | 24 | #else |
25 | . = PAGE_OFFSET + TEXT_OFFSET; | 25 | . = PAGE_OFFSET + TEXT_OFFSET; |
26 | #endif | 26 | #endif |
27 | .text.head : { | ||
28 | _stext = .; | ||
29 | _sinittext = .; | ||
30 | *(.text.head) | ||
31 | } | ||
32 | 27 | ||
33 | .init : { /* Init code and data */ | 28 | .init : { /* Init code and data */ |
29 | _stext = .; | ||
30 | _sinittext = .; | ||
31 | HEAD_TEXT | ||
34 | INIT_TEXT | 32 | INIT_TEXT |
35 | _einittext = .; | 33 | _einittext = .; |
36 | __proc_info_begin = .; | 34 | __proc_info_begin = .; |
@@ -42,48 +40,32 @@ SECTIONS | |||
42 | __tagtable_begin = .; | 40 | __tagtable_begin = .; |
43 | *(.taglist.init) | 41 | *(.taglist.init) |
44 | __tagtable_end = .; | 42 | __tagtable_end = .; |
45 | . = ALIGN(16); | 43 | |
46 | __setup_start = .; | 44 | INIT_SETUP(16) |
47 | *(.init.setup) | 45 | |
48 | __setup_end = .; | 46 | INIT_CALLS |
49 | __early_begin = .; | 47 | CON_INITCALL |
50 | *(.early_param.init) | 48 | SECURITY_INITCALL |
51 | __early_end = .; | 49 | INIT_RAM_FS |
52 | __initcall_start = .; | 50 | |
53 | INITCALLS | ||
54 | __initcall_end = .; | ||
55 | __con_initcall_start = .; | ||
56 | *(.con_initcall.init) | ||
57 | __con_initcall_end = .; | ||
58 | __security_initcall_start = .; | ||
59 | *(.security_initcall.init) | ||
60 | __security_initcall_end = .; | ||
61 | #ifdef CONFIG_BLK_DEV_INITRD | ||
62 | . = ALIGN(32); | ||
63 | __initramfs_start = .; | ||
64 | usr/built-in.o(.init.ramfs) | ||
65 | __initramfs_end = .; | ||
66 | #endif | ||
67 | . = ALIGN(PAGE_SIZE); | ||
68 | __per_cpu_load = .; | ||
69 | __per_cpu_start = .; | ||
70 | *(.data.percpu.page_aligned) | ||
71 | *(.data.percpu) | ||
72 | *(.data.percpu.shared_aligned) | ||
73 | __per_cpu_end = .; | ||
74 | #ifndef CONFIG_XIP_KERNEL | 51 | #ifndef CONFIG_XIP_KERNEL |
75 | __init_begin = _stext; | 52 | __init_begin = _stext; |
76 | INIT_DATA | 53 | INIT_DATA |
77 | . = ALIGN(PAGE_SIZE); | ||
78 | __init_end = .; | ||
79 | #endif | 54 | #endif |
80 | } | 55 | } |
81 | 56 | ||
82 | /DISCARD/ : { /* Exit code and data */ | 57 | PERCPU(PAGE_SIZE) |
83 | EXIT_TEXT | 58 | |
84 | EXIT_DATA | 59 | #ifndef CONFIG_XIP_KERNEL |
85 | *(.exitcall.exit) | 60 | . = ALIGN(PAGE_SIZE); |
86 | *(.discard) | 61 | __init_end = .; |
62 | #endif | ||
63 | |||
64 | /* | ||
65 | * unwind exit sections must be discarded before the rest of the | ||
66 | * unwind sections get included. | ||
67 | */ | ||
68 | /DISCARD/ : { | ||
87 | *(.ARM.exidx.exit.text) | 69 | *(.ARM.exidx.exit.text) |
88 | *(.ARM.extab.exit.text) | 70 | *(.ARM.extab.exit.text) |
89 | #ifndef CONFIG_HOTPLUG_CPU | 71 | #ifndef CONFIG_HOTPLUG_CPU |
@@ -157,7 +139,7 @@ SECTIONS | |||
157 | * first, the init task union, aligned | 139 | * first, the init task union, aligned |
158 | * to an 8192 byte boundary. | 140 | * to an 8192 byte boundary. |
159 | */ | 141 | */ |
160 | *(.data.init_task) | 142 | INIT_TASK_DATA(THREAD_SIZE) |
161 | 143 | ||
162 | #ifdef CONFIG_XIP_KERNEL | 144 | #ifdef CONFIG_XIP_KERNEL |
163 | . = ALIGN(PAGE_SIZE); | 145 | . = ALIGN(PAGE_SIZE); |
@@ -167,17 +149,8 @@ SECTIONS | |||
167 | __init_end = .; | 149 | __init_end = .; |
168 | #endif | 150 | #endif |
169 | 151 | ||
170 | . = ALIGN(PAGE_SIZE); | 152 | NOSAVE_DATA |
171 | __nosave_begin = .; | 153 | CACHELINE_ALIGNED_DATA(32) |
172 | *(.data.nosave) | ||
173 | . = ALIGN(PAGE_SIZE); | ||
174 | __nosave_end = .; | ||
175 | |||
176 | /* | ||
177 | * then the cacheline aligned data | ||
178 | */ | ||
179 | . = ALIGN(32); | ||
180 | *(.data.cacheline_aligned) | ||
181 | 154 | ||
182 | /* | 155 | /* |
183 | * The exception fixup table (might need resorting at runtime) | 156 | * The exception fixup table (might need resorting at runtime) |
@@ -256,21 +229,14 @@ SECTIONS | |||
256 | } | 229 | } |
257 | #endif | 230 | #endif |
258 | 231 | ||
259 | .bss : { | 232 | BSS_SECTION(0, 0, 0) |
260 | __bss_start = .; /* BSS */ | 233 | _end = .; |
261 | *(.bss) | 234 | |
262 | *(COMMON) | 235 | STABS_DEBUG |
263 | __bss_stop = .; | ||
264 | _end = .; | ||
265 | } | ||
266 | /* Stabs debugging sections. */ | ||
267 | .stab 0 : { *(.stab) } | ||
268 | .stabstr 0 : { *(.stabstr) } | ||
269 | .stab.excl 0 : { *(.stab.excl) } | ||
270 | .stab.exclstr 0 : { *(.stab.exclstr) } | ||
271 | .stab.index 0 : { *(.stab.index) } | ||
272 | .stab.indexstr 0 : { *(.stab.indexstr) } | ||
273 | .comment 0 : { *(.comment) } | 236 | .comment 0 : { *(.comment) } |
237 | |||
238 | /* Default discards */ | ||
239 | DISCARDS | ||
274 | } | 240 | } |
275 | 241 | ||
276 | /* | 242 | /* |
diff --git a/arch/arm/kernel/xscale-cp0.c b/arch/arm/kernel/xscale-cp0.c index 17127db906fa..1796157e3dd5 100644 --- a/arch/arm/kernel/xscale-cp0.c +++ b/arch/arm/kernel/xscale-cp0.c | |||
@@ -70,7 +70,7 @@ static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) | |||
70 | * initialised state information on the first fault. | 70 | * initialised state information on the first fault. |
71 | */ | 71 | */ |
72 | 72 | ||
73 | case THREAD_NOTIFY_RELEASE: | 73 | case THREAD_NOTIFY_EXIT: |
74 | iwmmxt_task_release(thread); | 74 | iwmmxt_task_release(thread); |
75 | break; | 75 | break; |
76 | 76 | ||