aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/armksyms.c2
-rw-r--r--arch/arm/kernel/calls.S3
-rw-r--r--arch/arm/kernel/entry-armv.S11
-rw-r--r--arch/arm/kernel/entry-common.S67
-rw-r--r--arch/arm/kernel/etm.c15
-rw-r--r--arch/arm/kernel/ftrace.c188
-rw-r--r--arch/arm/kernel/head-common.S2
-rw-r--r--arch/arm/kernel/head.S50
-rw-r--r--arch/arm/kernel/hw_breakpoint.c849
-rw-r--r--arch/arm/kernel/module.c68
-rw-r--r--arch/arm/kernel/perf_event.c12
-rw-r--r--arch/arm/kernel/process.c24
-rw-r--r--arch/arm/kernel/ptrace.c239
-rw-r--r--arch/arm/kernel/setup.c46
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/kernel/unwind.c2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S15
18 files changed, 1483 insertions, 114 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 980b78e31328..5b9b268f4fbb 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_KGDB) += kgdb.o
42obj-$(CONFIG_ARM_UNWIND) += unwind.o 42obj-$(CONFIG_ARM_UNWIND) += unwind.o
43obj-$(CONFIG_HAVE_TCM) += tcm.o 43obj-$(CONFIG_HAVE_TCM) += tcm.o
44obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 44obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
45obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
45 46
46obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o 47obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
47AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 48AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 8214bfebfaca..e5e1e5387678 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -165,6 +165,8 @@ EXPORT_SYMBOL(_find_next_bit_be);
165#endif 165#endif
166 166
167#ifdef CONFIG_FUNCTION_TRACER 167#ifdef CONFIG_FUNCTION_TRACER
168#ifdef CONFIG_OLD_MCOUNT
168EXPORT_SYMBOL(mcount); 169EXPORT_SYMBOL(mcount);
170#endif
169EXPORT_SYMBOL(__gnu_mcount_nc); 171EXPORT_SYMBOL(__gnu_mcount_nc);
170#endif 172#endif
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index afeb71fa72cb..5c26eccef998 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -376,6 +376,9 @@
376 CALL(sys_perf_event_open) 376 CALL(sys_perf_event_open)
377/* 365 */ CALL(sys_recvmmsg) 377/* 365 */ CALL(sys_recvmmsg)
378 CALL(sys_accept4) 378 CALL(sys_accept4)
379 CALL(sys_fanotify_init)
380 CALL(sys_fanotify_mark)
381 CALL(sys_prlimit64)
379#ifndef syscalls_counted 382#ifndef syscalls_counted
380.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 383.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
381#define syscalls_counted 384#define syscalls_counted
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index bb8e93a76407..c09e3573c5de 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -46,7 +46,8 @@
46 * this macro assumes that irqstat (r6) and base (r5) are 46 * this macro assumes that irqstat (r6) and base (r5) are
47 * preserved from get_irqnr_and_base above 47 * preserved from get_irqnr_and_base above
48 */ 48 */
49 test_for_ipi r0, r6, r5, lr 49 ALT_SMP(test_for_ipi r0, r6, r5, lr)
50 ALT_UP_B(9997f)
50 movne r0, sp 51 movne r0, sp
51 adrne lr, BSYM(1b) 52 adrne lr, BSYM(1b)
52 bne do_IPI 53 bne do_IPI
@@ -57,6 +58,7 @@
57 adrne lr, BSYM(1b) 58 adrne lr, BSYM(1b)
58 bne do_local_timer 59 bne do_local_timer
59#endif 60#endif
619997:
60#endif 62#endif
61 63
62 .endm 64 .endm
@@ -965,11 +967,8 @@ kuser_cmpxchg_fixup:
965 beq 1b 967 beq 1b
966 rsbs r0, r3, #0 968 rsbs r0, r3, #0
967 /* beware -- each __kuser slot must be 8 instructions max */ 969 /* beware -- each __kuser slot must be 8 instructions max */
968#ifdef CONFIG_SMP 970 ALT_SMP(b __kuser_memory_barrier)
969 b __kuser_memory_barrier 971 ALT_UP(usr_ret lr)
970#else
971 usr_ret lr
972#endif
973 972
974#endif 973#endif
975 974
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index f05a35a59694..2d23ad985180 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -48,6 +48,8 @@ work_pending:
48 beq no_work_pending 48 beq no_work_pending
49 mov r0, sp @ 'regs' 49 mov r0, sp @ 'regs'
50 mov r2, why @ 'syscall' 50 mov r2, why @ 'syscall'
51 tst r1, #_TIF_SIGPENDING @ delivering a signal?
52 movne why, #0 @ prevent further restarts
51 bl do_notify_resume 53 bl do_notify_resume
52 b ret_slow_syscall @ Check work again 54 b ret_slow_syscall @ Check work again
53 55
@@ -127,30 +129,58 @@ ENDPROC(ret_from_fork)
127 * clobber the ip register. This is OK because the ARM calling convention 129 * clobber the ip register. This is OK because the ARM calling convention
128 * allows it to be clobbered in subroutines and doesn't use it to hold 130 * allows it to be clobbered in subroutines and doesn't use it to hold
129 * parameters.) 131 * parameters.)
132 *
133 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
134 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
135 * arch/arm/kernel/ftrace.c).
130 */ 136 */
137
138#ifndef CONFIG_OLD_MCOUNT
139#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
140#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
141#endif
142#endif
143
131#ifdef CONFIG_DYNAMIC_FTRACE 144#ifdef CONFIG_DYNAMIC_FTRACE
132ENTRY(mcount) 145ENTRY(__gnu_mcount_nc)
146 mov ip, lr
147 ldmia sp!, {lr}
148 mov pc, ip
149ENDPROC(__gnu_mcount_nc)
150
151ENTRY(ftrace_caller)
133 stmdb sp!, {r0-r3, lr} 152 stmdb sp!, {r0-r3, lr}
134 mov r0, lr 153 mov r0, lr
135 sub r0, r0, #MCOUNT_INSN_SIZE 154 sub r0, r0, #MCOUNT_INSN_SIZE
155 ldr r1, [sp, #20]
136 156
137 .globl mcount_call 157 .global ftrace_call
138mcount_call: 158ftrace_call:
139 bl ftrace_stub 159 bl ftrace_stub
140 ldr lr, [fp, #-4] @ restore lr 160 ldmia sp!, {r0-r3, ip, lr}
141 ldmia sp!, {r0-r3, pc} 161 mov pc, ip
162ENDPROC(ftrace_caller)
142 163
143ENTRY(ftrace_caller) 164#ifdef CONFIG_OLD_MCOUNT
165ENTRY(mcount)
166 stmdb sp!, {lr}
167 ldr lr, [fp, #-4]
168 ldmia sp!, {pc}
169ENDPROC(mcount)
170
171ENTRY(ftrace_caller_old)
144 stmdb sp!, {r0-r3, lr} 172 stmdb sp!, {r0-r3, lr}
145 ldr r1, [fp, #-4] 173 ldr r1, [fp, #-4]
146 mov r0, lr 174 mov r0, lr
147 sub r0, r0, #MCOUNT_INSN_SIZE 175 sub r0, r0, #MCOUNT_INSN_SIZE
148 176
149 .globl ftrace_call 177 .globl ftrace_call_old
150ftrace_call: 178ftrace_call_old:
151 bl ftrace_stub 179 bl ftrace_stub
152 ldr lr, [fp, #-4] @ restore lr 180 ldr lr, [fp, #-4] @ restore lr
153 ldmia sp!, {r0-r3, pc} 181 ldmia sp!, {r0-r3, pc}
182ENDPROC(ftrace_caller_old)
183#endif
154 184
155#else 185#else
156 186
@@ -158,7 +188,7 @@ ENTRY(__gnu_mcount_nc)
158 stmdb sp!, {r0-r3, lr} 188 stmdb sp!, {r0-r3, lr}
159 ldr r0, =ftrace_trace_function 189 ldr r0, =ftrace_trace_function
160 ldr r2, [r0] 190 ldr r2, [r0]
161 adr r0, ftrace_stub 191 adr r0, .Lftrace_stub
162 cmp r0, r2 192 cmp r0, r2
163 bne gnu_trace 193 bne gnu_trace
164 ldmia sp!, {r0-r3, ip, lr} 194 ldmia sp!, {r0-r3, ip, lr}
@@ -168,11 +198,19 @@ gnu_trace:
168 ldr r1, [sp, #20] @ lr of instrumented routine 198 ldr r1, [sp, #20] @ lr of instrumented routine
169 mov r0, lr 199 mov r0, lr
170 sub r0, r0, #MCOUNT_INSN_SIZE 200 sub r0, r0, #MCOUNT_INSN_SIZE
171 mov lr, pc 201 adr lr, BSYM(1f)
172 mov pc, r2 202 mov pc, r2
2031:
173 ldmia sp!, {r0-r3, ip, lr} 204 ldmia sp!, {r0-r3, ip, lr}
174 mov pc, ip 205 mov pc, ip
206ENDPROC(__gnu_mcount_nc)
175 207
208#ifdef CONFIG_OLD_MCOUNT
209/*
210 * This is under an ifdef in order to force link-time errors for people trying
211 * to build with !FRAME_POINTER with a GCC which doesn't use the new-style
212 * mcount.
213 */
176ENTRY(mcount) 214ENTRY(mcount)
177 stmdb sp!, {r0-r3, lr} 215 stmdb sp!, {r0-r3, lr}
178 ldr r0, =ftrace_trace_function 216 ldr r0, =ftrace_trace_function
@@ -191,12 +229,15 @@ trace:
191 mov pc, r2 229 mov pc, r2
192 ldr lr, [fp, #-4] @ restore lr 230 ldr lr, [fp, #-4] @ restore lr
193 ldmia sp!, {r0-r3, pc} 231 ldmia sp!, {r0-r3, pc}
232ENDPROC(mcount)
233#endif
194 234
195#endif /* CONFIG_DYNAMIC_FTRACE */ 235#endif /* CONFIG_DYNAMIC_FTRACE */
196 236
197 .globl ftrace_stub 237ENTRY(ftrace_stub)
198ftrace_stub: 238.Lftrace_stub:
199 mov pc, lr 239 mov pc, lr
240ENDPROC(ftrace_stub)
200 241
201#endif /* CONFIG_FUNCTION_TRACER */ 242#endif /* CONFIG_FUNCTION_TRACER */
202 243
@@ -418,11 +459,13 @@ ENDPROC(sys_clone_wrapper)
418 459
419sys_sigreturn_wrapper: 460sys_sigreturn_wrapper:
420 add r0, sp, #S_OFF 461 add r0, sp, #S_OFF
462 mov why, #0 @ prevent syscall restart handling
421 b sys_sigreturn 463 b sys_sigreturn
422ENDPROC(sys_sigreturn_wrapper) 464ENDPROC(sys_sigreturn_wrapper)
423 465
424sys_rt_sigreturn_wrapper: 466sys_rt_sigreturn_wrapper:
425 add r0, sp, #S_OFF 467 add r0, sp, #S_OFF
468 mov why, #0 @ prevent syscall restart handling
426 b sys_rt_sigreturn 469 b sys_rt_sigreturn
427ENDPROC(sys_rt_sigreturn_wrapper) 470ENDPROC(sys_rt_sigreturn_wrapper)
428 471
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 33c7077174db..a48d51257988 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -30,6 +30,21 @@
30MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Alexander Shishkin"); 31MODULE_AUTHOR("Alexander Shishkin");
32 32
33/*
34 * ETM tracer state
35 */
36struct tracectx {
37 unsigned int etb_bufsz;
38 void __iomem *etb_regs;
39 void __iomem *etm_regs;
40 unsigned long flags;
41 int ncmppairs;
42 int etm_portsz;
43 struct device *dev;
44 struct clk *emu_clk;
45 struct mutex mutex;
46};
47
33static struct tracectx tracer; 48static struct tracectx tracer;
34 49
35static inline bool trace_isrunning(struct tracectx *t) 50static inline bool trace_isrunning(struct tracectx *t)
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 0298286ad4ad..971ac8c36ea7 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -2,102 +2,194 @@
2 * Dynamic function tracing support. 2 * Dynamic function tracing support.
3 * 3 *
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com> 4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
5 * 6 *
6 * For licencing details, see COPYING. 7 * For licencing details, see COPYING.
7 * 8 *
8 * Defines low-level handling of mcount calls when the kernel 9 * Defines low-level handling of mcount calls when the kernel
9 * is compiled with the -pg flag. When using dynamic ftrace, the 10 * is compiled with the -pg flag. When using dynamic ftrace, the
10 * mcount call-sites get patched lazily with NOP till they are 11 * mcount call-sites get patched with NOP till they are enabled.
11 * enabled. All code mutation routines here take effect atomically. 12 * All code mutation routines here are called under stop_machine().
12 */ 13 */
13 14
14#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/uaccess.h>
15 17
16#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
17#include <asm/ftrace.h> 19#include <asm/ftrace.h>
18 20
19#define PC_OFFSET 8 21#ifdef CONFIG_THUMB2_KERNEL
20#define BL_OPCODE 0xeb000000 22#define NOP 0xeb04f85d /* pop.w {lr} */
21#define BL_OFFSET_MASK 0x00ffffff 23#else
24#define NOP 0xe8bd4000 /* pop {lr} */
25#endif
22 26
23static unsigned long bl_insn; 27#ifdef CONFIG_OLD_MCOUNT
24static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */ 28#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
29#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
25 30
26unsigned char *ftrace_nop_replace(void) 31#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
32
33static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
27{ 34{
28 return (char *)&NOP; 35 return rec->arch.old_mcount ? OLD_NOP : NOP;
29} 36}
30 37
38static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
39{
40 if (!rec->arch.old_mcount)
41 return addr;
42
43 if (addr == MCOUNT_ADDR)
44 addr = OLD_MCOUNT_ADDR;
45 else if (addr == FTRACE_ADDR)
46 addr = OLD_FTRACE_ADDR;
47
48 return addr;
49}
50#else
51static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
52{
53 return NOP;
54}
55
56static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
57{
58 return addr;
59}
60#endif
61
31/* construct a branch (BL) instruction to addr */ 62/* construct a branch (BL) instruction to addr */
32unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr) 63#ifdef CONFIG_THUMB2_KERNEL
64static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
33{ 65{
66 unsigned long s, j1, j2, i1, i2, imm10, imm11;
67 unsigned long first, second;
34 long offset; 68 long offset;
35 69
36 offset = (long)addr - (long)(pc + PC_OFFSET); 70 offset = (long)addr - (long)(pc + 4);
71 if (offset < -16777216 || offset > 16777214) {
72 WARN_ON_ONCE(1);
73 return 0;
74 }
75
76 s = (offset >> 24) & 0x1;
77 i1 = (offset >> 23) & 0x1;
78 i2 = (offset >> 22) & 0x1;
79 imm10 = (offset >> 12) & 0x3ff;
80 imm11 = (offset >> 1) & 0x7ff;
81
82 j1 = (!i1) ^ s;
83 j2 = (!i2) ^ s;
84
85 first = 0xf000 | (s << 10) | imm10;
86 second = 0xd000 | (j1 << 13) | (j2 << 11) | imm11;
87
88 return (second << 16) | first;
89}
90#else
91static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
92{
93 long offset;
94
95 offset = (long)addr - (long)(pc + 8);
37 if (unlikely(offset < -33554432 || offset > 33554428)) { 96 if (unlikely(offset < -33554432 || offset > 33554428)) {
38 /* Can't generate branches that far (from ARM ARM). Ftrace 97 /* Can't generate branches that far (from ARM ARM). Ftrace
39 * doesn't generate branches outside of kernel text. 98 * doesn't generate branches outside of kernel text.
40 */ 99 */
41 WARN_ON_ONCE(1); 100 WARN_ON_ONCE(1);
42 return NULL; 101 return 0;
43 } 102 }
44 offset = (offset >> 2) & BL_OFFSET_MASK;
45 bl_insn = BL_OPCODE | offset;
46 return (unsigned char *)&bl_insn;
47}
48 103
49int ftrace_modify_code(unsigned long pc, unsigned char *old_code, 104 offset = (offset >> 2) & 0x00ffffff;
50 unsigned char *new_code)
51{
52 unsigned long err = 0, replaced = 0, old, new;
53 105
54 old = *(unsigned long *)old_code; 106 return 0xeb000000 | offset;
55 new = *(unsigned long *)new_code; 107}
108#endif
56 109
57 __asm__ __volatile__ ( 110static int ftrace_modify_code(unsigned long pc, unsigned long old,
58 "1: ldr %1, [%2] \n" 111 unsigned long new)
59 " cmp %1, %4 \n" 112{
60 "2: streq %3, [%2] \n" 113 unsigned long replaced;
61 " cmpne %1, %3 \n"
62 " movne %0, #2 \n"
63 "3:\n"
64 114
65 ".pushsection .fixup, \"ax\"\n" 115 if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
66 "4: mov %0, #1 \n" 116 return -EFAULT;
67 " b 3b \n"
68 ".popsection\n"
69 117
70 ".pushsection __ex_table, \"a\"\n" 118 if (replaced != old)
71 " .long 1b, 4b \n" 119 return -EINVAL;
72 " .long 2b, 4b \n"
73 ".popsection\n"
74 120
75 : "=r"(err), "=r"(replaced) 121 if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
76 : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced) 122 return -EPERM;
77 : "memory");
78 123
79 if (!err && (replaced == old)) 124 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
80 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
81 125
82 return err; 126 return 0;
83} 127}
84 128
85int ftrace_update_ftrace_func(ftrace_func_t func) 129int ftrace_update_ftrace_func(ftrace_func_t func)
86{ 130{
87 int ret;
88 unsigned long pc, old; 131 unsigned long pc, old;
89 unsigned char *new; 132 unsigned long new;
133 int ret;
90 134
91 pc = (unsigned long)&ftrace_call; 135 pc = (unsigned long)&ftrace_call;
92 memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE); 136 memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
93 new = ftrace_call_replace(pc, (unsigned long)func); 137 new = ftrace_call_replace(pc, (unsigned long)func);
94 ret = ftrace_modify_code(pc, (unsigned char *)&old, new); 138
139 ret = ftrace_modify_code(pc, old, new);
140
141#ifdef CONFIG_OLD_MCOUNT
142 if (!ret) {
143 pc = (unsigned long)&ftrace_call_old;
144 memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE);
145 new = ftrace_call_replace(pc, (unsigned long)func);
146
147 ret = ftrace_modify_code(pc, old, new);
148 }
149#endif
150
151 return ret;
152}
153
154int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
155{
156 unsigned long new, old;
157 unsigned long ip = rec->ip;
158
159 old = ftrace_nop_replace(rec);
160 new = ftrace_call_replace(ip, adjust_address(rec, addr));
161
162 return ftrace_modify_code(rec->ip, old, new);
163}
164
165int ftrace_make_nop(struct module *mod,
166 struct dyn_ftrace *rec, unsigned long addr)
167{
168 unsigned long ip = rec->ip;
169 unsigned long old;
170 unsigned long new;
171 int ret;
172
173 old = ftrace_call_replace(ip, adjust_address(rec, addr));
174 new = ftrace_nop_replace(rec);
175 ret = ftrace_modify_code(ip, old, new);
176
177#ifdef CONFIG_OLD_MCOUNT
178 if (ret == -EINVAL && addr == MCOUNT_ADDR) {
179 rec->arch.old_mcount = true;
180
181 old = ftrace_call_replace(ip, adjust_address(rec, addr));
182 new = ftrace_nop_replace(rec);
183 ret = ftrace_modify_code(ip, old, new);
184 }
185#endif
186
95 return ret; 187 return ret;
96} 188}
97 189
98/* run from ftrace_init with irqs disabled */
99int __init ftrace_dyn_arch_init(void *data) 190int __init ftrace_dyn_arch_init(void *data)
100{ 191{
101 ftrace_mcount_set(data); 192 *(unsigned long *)data = 0;
193
102 return 0; 194 return 0;
103} 195}
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index b9505aa267c0..58a3e632b6d5 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -20,7 +20,7 @@
20__switch_data: 20__switch_data:
21 .long __mmap_switched 21 .long __mmap_switched
22 .long __data_loc @ r4 22 .long __data_loc @ r4
23 .long _data @ r5 23 .long _sdata @ r5
24 .long __bss_start @ r6 24 .long __bss_start @ r6
25 .long _end @ r7 25 .long _end @ r7
26 .long processor_id @ r4 26 .long processor_id @ r4
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index eb62bf947212..b44d21e1e344 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -86,6 +86,9 @@ ENTRY(stext)
86 movs r8, r5 @ invalid machine (r5=0)? 86 movs r8, r5 @ invalid machine (r5=0)?
87 beq __error_a @ yes, error 'a' 87 beq __error_a @ yes, error 'a'
88 bl __vet_atags 88 bl __vet_atags
89#ifdef CONFIG_SMP_ON_UP
90 bl __fixup_smp
91#endif
89 bl __create_page_tables 92 bl __create_page_tables
90 93
91 /* 94 /*
@@ -333,4 +336,51 @@ __create_page_tables:
333ENDPROC(__create_page_tables) 336ENDPROC(__create_page_tables)
334 .ltorg 337 .ltorg
335 338
339#ifdef CONFIG_SMP_ON_UP
340__fixup_smp:
341 mov r7, #0x00070000
342 orr r6, r7, #0xff000000 @ mask 0xff070000
343 orr r7, r7, #0x41000000 @ val 0x41070000
344 and r0, r9, r6
345 teq r0, r7 @ ARM CPU and ARMv6/v7?
346 bne __fixup_smp_on_up @ no, assume UP
347
348 orr r6, r6, #0x0000ff00
349 orr r6, r6, #0x000000f0 @ mask 0xff07fff0
350 orr r7, r7, #0x0000b000
351 orr r7, r7, #0x00000020 @ val 0x4107b020
352 and r0, r9, r6
353 teq r0, r7 @ ARM 11MPCore?
354 moveq pc, lr @ yes, assume SMP
355
356 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
357 tst r0, #1 << 31
358 movne pc, lr @ bit 31 => SMP
359
360__fixup_smp_on_up:
361 adr r0, 1f
362 ldmia r0, {r3, r6, r7}
363 sub r3, r0, r3
364 add r6, r6, r3
365 add r7, r7, r3
3662: cmp r6, r7
367 ldmia r6!, {r0, r4}
368 strlo r4, [r0, r3]
369 blo 2b
370 mov pc, lr
371ENDPROC(__fixup_smp)
372
3731: .word .
374 .word __smpalt_begin
375 .word __smpalt_end
376
377 .pushsection .data
378 .globl smp_on_up
379smp_on_up:
380 ALT_SMP(.long 1)
381 ALT_UP(.long 0)
382 .popsection
383
384#endif
385
336#include "head-common.S" 386#include "head-common.S"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
new file mode 100644
index 000000000000..54593b0c241b
--- /dev/null
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -0,0 +1,849 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
14 *
15 * Copyright (C) 2009, 2010 ARM Limited
16 *
17 * Author: Will Deacon <will.deacon@arm.com>
18 */
19
20/*
21 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
22 * using the CPU's debug registers.
23 */
24#define pr_fmt(fmt) "hw-breakpoint: " fmt
25
26#include <linux/errno.h>
27#include <linux/perf_event.h>
28#include <linux/hw_breakpoint.h>
29#include <linux/smp.h>
30
31#include <asm/cacheflush.h>
32#include <asm/cputype.h>
33#include <asm/current.h>
34#include <asm/hw_breakpoint.h>
35#include <asm/kdebug.h>
36#include <asm/system.h>
37#include <asm/traps.h>
38
39/* Breakpoint currently in use for each BRP. */
40static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
41
42/* Watchpoint currently in use for each WRP. */
43static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
44
45/* Number of BRP/WRP registers on this CPU. */
46static int core_num_brps;
47static int core_num_wrps;
48
49/* Debug architecture version. */
50static u8 debug_arch;
51
52/* Maximum supported watchpoint length. */
53static u8 max_watchpoint_len;
54
55/* Determine number of BRP registers available. */
56static int get_num_brps(void)
57{
58 u32 didr;
59 ARM_DBG_READ(c0, 0, didr);
60 return ((didr >> 24) & 0xf) + 1;
61}
62
63/* Determine number of WRP registers available. */
64static int get_num_wrps(void)
65{
66 /*
67 * FIXME: When a watchpoint fires, the only way to work out which
68 * watchpoint it was is by disassembling the faulting instruction
69 * and working out the address of the memory access.
70 *
71 * Furthermore, we can only do this if the watchpoint was precise
72 * since imprecise watchpoints prevent us from calculating register
73 * based addresses.
74 *
75 * For the time being, we only report 1 watchpoint register so we
76 * always know which watchpoint fired. In the future we can either
77 * add a disassembler and address generation emulator, or we can
78 * insert a check to see if the DFAR is set on watchpoint exception
79 * entry [the ARM ARM states that the DFAR is UNKNOWN, but
80 * experience shows that it is set on some implementations].
81 */
82
83#if 0
84 u32 didr, wrps;
85 ARM_DBG_READ(c0, 0, didr);
86 return ((didr >> 28) & 0xf) + 1;
87#endif
88
89 return 1;
90}
91
92int hw_breakpoint_slots(int type)
93{
94 /*
95 * We can be called early, so don't rely on
96 * our static variables being initialised.
97 */
98 switch (type) {
99 case TYPE_INST:
100 return get_num_brps();
101 case TYPE_DATA:
102 return get_num_wrps();
103 default:
104 pr_warning("unknown slot type: %d\n", type);
105 return 0;
106 }
107}
108
109/* Determine debug architecture. */
110static u8 get_debug_arch(void)
111{
112 u32 didr;
113
114 /* Do we implement the extended CPUID interface? */
115 if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
116 pr_warning("CPUID feature registers not supported. "
117 "Assuming v6 debug is present.\n");
118 return ARM_DEBUG_ARCH_V6;
119 }
120
121 ARM_DBG_READ(c0, 0, didr);
122 return (didr >> 16) & 0xf;
123}
124
125/* Does this core support mismatch breakpoints? */
126static int core_has_mismatch_bps(void)
127{
128 return debug_arch >= ARM_DEBUG_ARCH_V7_ECP14 && core_num_brps > 1;
129}
130
131u8 arch_get_debug_arch(void)
132{
133 return debug_arch;
134}
135
136#define READ_WB_REG_CASE(OP2, M, VAL) \
137 case ((OP2 << 4) + M): \
138 ARM_DBG_READ(c ## M, OP2, VAL); \
139 break
140
141#define WRITE_WB_REG_CASE(OP2, M, VAL) \
142 case ((OP2 << 4) + M): \
143 ARM_DBG_WRITE(c ## M, OP2, VAL);\
144 break
145
146#define GEN_READ_WB_REG_CASES(OP2, VAL) \
147 READ_WB_REG_CASE(OP2, 0, VAL); \
148 READ_WB_REG_CASE(OP2, 1, VAL); \
149 READ_WB_REG_CASE(OP2, 2, VAL); \
150 READ_WB_REG_CASE(OP2, 3, VAL); \
151 READ_WB_REG_CASE(OP2, 4, VAL); \
152 READ_WB_REG_CASE(OP2, 5, VAL); \
153 READ_WB_REG_CASE(OP2, 6, VAL); \
154 READ_WB_REG_CASE(OP2, 7, VAL); \
155 READ_WB_REG_CASE(OP2, 8, VAL); \
156 READ_WB_REG_CASE(OP2, 9, VAL); \
157 READ_WB_REG_CASE(OP2, 10, VAL); \
158 READ_WB_REG_CASE(OP2, 11, VAL); \
159 READ_WB_REG_CASE(OP2, 12, VAL); \
160 READ_WB_REG_CASE(OP2, 13, VAL); \
161 READ_WB_REG_CASE(OP2, 14, VAL); \
162 READ_WB_REG_CASE(OP2, 15, VAL)
163
164#define GEN_WRITE_WB_REG_CASES(OP2, VAL) \
165 WRITE_WB_REG_CASE(OP2, 0, VAL); \
166 WRITE_WB_REG_CASE(OP2, 1, VAL); \
167 WRITE_WB_REG_CASE(OP2, 2, VAL); \
168 WRITE_WB_REG_CASE(OP2, 3, VAL); \
169 WRITE_WB_REG_CASE(OP2, 4, VAL); \
170 WRITE_WB_REG_CASE(OP2, 5, VAL); \
171 WRITE_WB_REG_CASE(OP2, 6, VAL); \
172 WRITE_WB_REG_CASE(OP2, 7, VAL); \
173 WRITE_WB_REG_CASE(OP2, 8, VAL); \
174 WRITE_WB_REG_CASE(OP2, 9, VAL); \
175 WRITE_WB_REG_CASE(OP2, 10, VAL); \
176 WRITE_WB_REG_CASE(OP2, 11, VAL); \
177 WRITE_WB_REG_CASE(OP2, 12, VAL); \
178 WRITE_WB_REG_CASE(OP2, 13, VAL); \
179 WRITE_WB_REG_CASE(OP2, 14, VAL); \
180 WRITE_WB_REG_CASE(OP2, 15, VAL)
181
182static u32 read_wb_reg(int n)
183{
184 u32 val = 0;
185
186 switch (n) {
187 GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val);
188 GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val);
189 GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val);
190 GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val);
191 default:
192 pr_warning("attempt to read from unknown breakpoint "
193 "register %d\n", n);
194 }
195
196 return val;
197}
198
199static void write_wb_reg(int n, u32 val)
200{
201 switch (n) {
202 GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val);
203 GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val);
204 GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val);
205 GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val);
206 default:
207 pr_warning("attempt to write to unknown breakpoint "
208 "register %d\n", n);
209 }
210 isb();
211}
212
213/*
214 * In order to access the breakpoint/watchpoint control registers,
215 * we must be running in debug monitor mode. Unfortunately, we can
216 * be put into halting debug mode at any time by an external debugger
217 * but there is nothing we can do to prevent that.
218 */
219static int enable_monitor_mode(void)
220{
221 u32 dscr;
222 int ret = 0;
223
224 ARM_DBG_READ(c1, 0, dscr);
225
226 /* Ensure that halting mode is disabled. */
227 if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, "halting debug mode enabled."
228 "Unable to access hardware resources.")) {
229 ret = -EPERM;
230 goto out;
231 }
232
233 /* Write to the corresponding DSCR. */
234 switch (debug_arch) {
235 case ARM_DEBUG_ARCH_V6:
236 case ARM_DEBUG_ARCH_V6_1:
237 ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
238 break;
239 case ARM_DEBUG_ARCH_V7_ECP14:
240 ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
241 break;
242 default:
243 ret = -ENODEV;
244 goto out;
245 }
246
247 /* Check that the write made it through. */
248 ARM_DBG_READ(c1, 0, dscr);
249 if (WARN_ONCE(!(dscr & ARM_DSCR_MDBGEN),
250 "failed to enable monitor mode.")) {
251 ret = -EPERM;
252 }
253
254out:
255 return ret;
256}
257
258/*
259 * Check if 8-bit byte-address select is available.
260 * This clobbers WRP 0.
261 */
262static u8 get_max_wp_len(void)
263{
264 u32 ctrl_reg;
265 struct arch_hw_breakpoint_ctrl ctrl;
266 u8 size = 4;
267
268 if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
269 goto out;
270
271 if (enable_monitor_mode())
272 goto out;
273
274 memset(&ctrl, 0, sizeof(ctrl));
275 ctrl.len = ARM_BREAKPOINT_LEN_8;
276 ctrl_reg = encode_ctrl_reg(ctrl);
277
278 write_wb_reg(ARM_BASE_WVR, 0);
279 write_wb_reg(ARM_BASE_WCR, ctrl_reg);
280 if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg)
281 size = 8;
282
283out:
284 return size;
285}
286
287u8 arch_get_max_wp_len(void)
288{
289 return max_watchpoint_len;
290}
291
292/*
293 * Handler for reactivating a suspended watchpoint when the single
294 * step `mismatch' breakpoint is triggered.
295 */
296static void wp_single_step_handler(struct perf_event *bp, int unused,
297 struct perf_sample_data *data,
298 struct pt_regs *regs)
299{
300 perf_event_enable(counter_arch_bp(bp)->suspended_wp);
301 unregister_hw_breakpoint(bp);
302}
303
304static int bp_is_single_step(struct perf_event *bp)
305{
306 return bp->overflow_handler == wp_single_step_handler;
307}
308
309/*
310 * Install a perf counter breakpoint.
311 */
312int arch_install_hw_breakpoint(struct perf_event *bp)
313{
314 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
315 struct perf_event **slot, **slots;
316 int i, max_slots, ctrl_base, val_base, ret = 0;
317
318 /* Ensure that we are in monitor mode and halting mode is disabled. */
319 ret = enable_monitor_mode();
320 if (ret)
321 goto out;
322
323 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
324 /* Breakpoint */
325 ctrl_base = ARM_BASE_BCR;
326 val_base = ARM_BASE_BVR;
327 slots = __get_cpu_var(bp_on_reg);
328 max_slots = core_num_brps - 1;
329
330 if (bp_is_single_step(bp)) {
331 info->ctrl.mismatch = 1;
332 i = max_slots;
333 slots[i] = bp;
334 goto setup;
335 }
336 } else {
337 /* Watchpoint */
338 ctrl_base = ARM_BASE_WCR;
339 val_base = ARM_BASE_WVR;
340 slots = __get_cpu_var(wp_on_reg);
341 max_slots = core_num_wrps;
342 }
343
344 for (i = 0; i < max_slots; ++i) {
345 slot = &slots[i];
346
347 if (!*slot) {
348 *slot = bp;
349 break;
350 }
351 }
352
353 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) {
354 ret = -EBUSY;
355 goto out;
356 }
357
358setup:
359 /* Setup the address register. */
360 write_wb_reg(val_base + i, info->address);
361
362 /* Setup the control register. */
363 write_wb_reg(ctrl_base + i, encode_ctrl_reg(info->ctrl) | 0x1);
364
365out:
366 return ret;
367}
368
369void arch_uninstall_hw_breakpoint(struct perf_event *bp)
370{
371 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
372 struct perf_event **slot, **slots;
373 int i, max_slots, base;
374
375 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
376 /* Breakpoint */
377 base = ARM_BASE_BCR;
378 slots = __get_cpu_var(bp_on_reg);
379 max_slots = core_num_brps - 1;
380
381 if (bp_is_single_step(bp)) {
382 i = max_slots;
383 slots[i] = NULL;
384 goto reset;
385 }
386 } else {
387 /* Watchpoint */
388 base = ARM_BASE_WCR;
389 slots = __get_cpu_var(wp_on_reg);
390 max_slots = core_num_wrps;
391 }
392
393 /* Remove the breakpoint. */
394 for (i = 0; i < max_slots; ++i) {
395 slot = &slots[i];
396
397 if (*slot == bp) {
398 *slot = NULL;
399 break;
400 }
401 }
402
403 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
404 return;
405
406reset:
407 /* Reset the control register. */
408 write_wb_reg(base + i, 0);
409}
410
411static int get_hbp_len(u8 hbp_len)
412{
413 unsigned int len_in_bytes = 0;
414
415 switch (hbp_len) {
416 case ARM_BREAKPOINT_LEN_1:
417 len_in_bytes = 1;
418 break;
419 case ARM_BREAKPOINT_LEN_2:
420 len_in_bytes = 2;
421 break;
422 case ARM_BREAKPOINT_LEN_4:
423 len_in_bytes = 4;
424 break;
425 case ARM_BREAKPOINT_LEN_8:
426 len_in_bytes = 8;
427 break;
428 }
429
430 return len_in_bytes;
431}
432
433/*
434 * Check whether bp virtual address is in kernel space.
435 */
436int arch_check_bp_in_kernelspace(struct perf_event *bp)
437{
438 unsigned int len;
439 unsigned long va;
440 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
441
442 va = info->address;
443 len = get_hbp_len(info->ctrl.len);
444
445 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
446}
447
448/*
449 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
450 * Hopefully this will disappear when ptrace can bypass the conversion
451 * to generic breakpoint descriptions.
452 */
453int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
454 int *gen_len, int *gen_type)
455{
456 /* Type */
457 switch (ctrl.type) {
458 case ARM_BREAKPOINT_EXECUTE:
459 *gen_type = HW_BREAKPOINT_X;
460 break;
461 case ARM_BREAKPOINT_LOAD:
462 *gen_type = HW_BREAKPOINT_R;
463 break;
464 case ARM_BREAKPOINT_STORE:
465 *gen_type = HW_BREAKPOINT_W;
466 break;
467 case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
468 *gen_type = HW_BREAKPOINT_RW;
469 break;
470 default:
471 return -EINVAL;
472 }
473
474 /* Len */
475 switch (ctrl.len) {
476 case ARM_BREAKPOINT_LEN_1:
477 *gen_len = HW_BREAKPOINT_LEN_1;
478 break;
479 case ARM_BREAKPOINT_LEN_2:
480 *gen_len = HW_BREAKPOINT_LEN_2;
481 break;
482 case ARM_BREAKPOINT_LEN_4:
483 *gen_len = HW_BREAKPOINT_LEN_4;
484 break;
485 case ARM_BREAKPOINT_LEN_8:
486 *gen_len = HW_BREAKPOINT_LEN_8;
487 break;
488 default:
489 return -EINVAL;
490 }
491
492 return 0;
493}
494
495/*
496 * Construct an arch_hw_breakpoint from a perf_event.
497 */
498static int arch_build_bp_info(struct perf_event *bp)
499{
500 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
501
502 /* Type */
503 switch (bp->attr.bp_type) {
504 case HW_BREAKPOINT_X:
505 info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
506 break;
507 case HW_BREAKPOINT_R:
508 info->ctrl.type = ARM_BREAKPOINT_LOAD;
509 break;
510 case HW_BREAKPOINT_W:
511 info->ctrl.type = ARM_BREAKPOINT_STORE;
512 break;
513 case HW_BREAKPOINT_RW:
514 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
515 break;
516 default:
517 return -EINVAL;
518 }
519
520 /* Len */
521 switch (bp->attr.bp_len) {
522 case HW_BREAKPOINT_LEN_1:
523 info->ctrl.len = ARM_BREAKPOINT_LEN_1;
524 break;
525 case HW_BREAKPOINT_LEN_2:
526 info->ctrl.len = ARM_BREAKPOINT_LEN_2;
527 break;
528 case HW_BREAKPOINT_LEN_4:
529 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
530 break;
531 case HW_BREAKPOINT_LEN_8:
532 info->ctrl.len = ARM_BREAKPOINT_LEN_8;
533 if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
534 && max_watchpoint_len >= 8)
535 break;
536 default:
537 return -EINVAL;
538 }
539
540 /* Address */
541 info->address = bp->attr.bp_addr;
542
543 /* Privilege */
544 info->ctrl.privilege = ARM_BREAKPOINT_USER;
545 if (arch_check_bp_in_kernelspace(bp) && !bp_is_single_step(bp))
546 info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
547
548 /* Enabled? */
549 info->ctrl.enabled = !bp->attr.disabled;
550
551 /* Mismatch */
552 info->ctrl.mismatch = 0;
553
554 return 0;
555}
556
557/*
558 * Validate the arch-specific HW Breakpoint register settings.
559 */
560int arch_validate_hwbkpt_settings(struct perf_event *bp)
561{
562 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
563 int ret = 0;
564 u32 bytelen, max_len, offset, alignment_mask = 0x3;
565
566 /* Build the arch_hw_breakpoint. */
567 ret = arch_build_bp_info(bp);
568 if (ret)
569 goto out;
570
571 /* Check address alignment. */
572 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
573 alignment_mask = 0x7;
574 if (info->address & alignment_mask) {
575 /*
576 * Try to fix the alignment. This may result in a length
577 * that is too large, so we must check for that.
578 */
579 bytelen = get_hbp_len(info->ctrl.len);
580 max_len = info->ctrl.type == ARM_BREAKPOINT_EXECUTE ? 4 :
581 max_watchpoint_len;
582
583 if (max_len >= 8)
584 offset = info->address & 0x7;
585 else
586 offset = info->address & 0x3;
587
588 if (bytelen > (1 << ((max_len - (offset + 1)) >> 1))) {
589 ret = -EFBIG;
590 goto out;
591 }
592
593 info->ctrl.len <<= offset;
594 info->address &= ~offset;
595
596 pr_debug("breakpoint alignment fixup: length = 0x%x, "
597 "address = 0x%x\n", info->ctrl.len, info->address);
598 }
599
600 /*
601 * Currently we rely on an overflow handler to take
602 * care of single-stepping the breakpoint when it fires.
603 * In the case of userspace breakpoints on a core with V7 debug,
604 * we can use the mismatch feature as a poor-man's hardware single-step.
605 */
606 if (WARN_ONCE(!bp->overflow_handler &&
607 (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_bps()),
608 "overflow handler required but none found")) {
609 ret = -EINVAL;
610 goto out;
611 }
612out:
613 return ret;
614}
615
616static void update_mismatch_flag(int idx, int flag)
617{
618 struct perf_event *bp = __get_cpu_var(bp_on_reg[idx]);
619 struct arch_hw_breakpoint *info;
620
621 if (bp == NULL)
622 return;
623
624 info = counter_arch_bp(bp);
625
626 /* Update the mismatch field to enter/exit `single-step' mode */
627 if (!bp->overflow_handler && info->ctrl.mismatch != flag) {
628 info->ctrl.mismatch = flag;
629 write_wb_reg(ARM_BASE_BCR + idx, encode_ctrl_reg(info->ctrl) | 0x1);
630 }
631}
632
633static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
634{
635 int i;
636 struct perf_event *bp, **slots = __get_cpu_var(wp_on_reg);
637 struct arch_hw_breakpoint *info;
638 struct perf_event_attr attr;
639
640 /* Without a disassembler, we can only handle 1 watchpoint. */
641 BUG_ON(core_num_wrps > 1);
642
643 hw_breakpoint_init(&attr);
644 attr.bp_addr = regs->ARM_pc & ~0x3;
645 attr.bp_len = HW_BREAKPOINT_LEN_4;
646 attr.bp_type = HW_BREAKPOINT_X;
647
648 for (i = 0; i < core_num_wrps; ++i) {
649 rcu_read_lock();
650
651 if (slots[i] == NULL) {
652 rcu_read_unlock();
653 continue;
654 }
655
656 /*
657 * The DFAR is an unknown value. Since we only allow a
658 * single watchpoint, we can set the trigger to the lowest
659 * possible faulting address.
660 */
661 info = counter_arch_bp(slots[i]);
662 info->trigger = slots[i]->attr.bp_addr;
663 pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
664 perf_bp_event(slots[i], regs);
665
666 /*
667 * If no overflow handler is present, insert a temporary
668 * mismatch breakpoint so we can single-step over the
669 * watchpoint trigger.
670 */
671 if (!slots[i]->overflow_handler) {
672 bp = register_user_hw_breakpoint(&attr,
673 wp_single_step_handler,
674 current);
675 counter_arch_bp(bp)->suspended_wp = slots[i];
676 perf_event_disable(slots[i]);
677 }
678
679 rcu_read_unlock();
680 }
681}
682
683static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
684{
685 int i;
686 int mismatch;
687 u32 ctrl_reg, val, addr;
688 struct perf_event *bp, **slots = __get_cpu_var(bp_on_reg);
689 struct arch_hw_breakpoint *info;
690 struct arch_hw_breakpoint_ctrl ctrl;
691
692 /* The exception entry code places the amended lr in the PC. */
693 addr = regs->ARM_pc;
694
695 for (i = 0; i < core_num_brps; ++i) {
696 rcu_read_lock();
697
698 bp = slots[i];
699
700 if (bp == NULL) {
701 rcu_read_unlock();
702 continue;
703 }
704
705 mismatch = 0;
706
707 /* Check if the breakpoint value matches. */
708 val = read_wb_reg(ARM_BASE_BVR + i);
709 if (val != (addr & ~0x3))
710 goto unlock;
711
712 /* Possible match, check the byte address select to confirm. */
713 ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
714 decode_ctrl_reg(ctrl_reg, &ctrl);
715 if ((1 << (addr & 0x3)) & ctrl.len) {
716 mismatch = 1;
717 info = counter_arch_bp(bp);
718 info->trigger = addr;
719 }
720
721unlock:
722 if ((mismatch && !info->ctrl.mismatch) || bp_is_single_step(bp)) {
723 pr_debug("breakpoint fired: address = 0x%x\n", addr);
724 perf_bp_event(bp, regs);
725 }
726
727 update_mismatch_flag(i, mismatch);
728 rcu_read_unlock();
729 }
730}
731
732/*
733 * Called from either the Data Abort Handler [watchpoint] or the
734 * Prefetch Abort Handler [breakpoint].
735 */
736static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
737 struct pt_regs *regs)
738{
739 int ret = 1; /* Unhandled fault. */
740 u32 dscr;
741
742 /* We only handle watchpoints and hardware breakpoints. */
743 ARM_DBG_READ(c1, 0, dscr);
744
745 /* Perform perf callbacks. */
746 switch (ARM_DSCR_MOE(dscr)) {
747 case ARM_ENTRY_BREAKPOINT:
748 breakpoint_handler(addr, regs);
749 break;
750 case ARM_ENTRY_ASYNC_WATCHPOINT:
751 WARN_ON("Asynchronous watchpoint exception taken. "
752 "Debugging results may be unreliable");
753 case ARM_ENTRY_SYNC_WATCHPOINT:
754 watchpoint_handler(addr, regs);
755 break;
756 default:
757 goto out;
758 }
759
760 ret = 0;
761out:
762 return ret;
763}
764
765/*
766 * One-time initialisation.
767 */
768static void __init reset_ctrl_regs(void *unused)
769{
770 int i;
771
772 if (enable_monitor_mode())
773 return;
774
775 for (i = 0; i < core_num_brps; ++i) {
776 write_wb_reg(ARM_BASE_BCR + i, 0UL);
777 write_wb_reg(ARM_BASE_BVR + i, 0UL);
778 }
779
780 for (i = 0; i < core_num_wrps; ++i) {
781 write_wb_reg(ARM_BASE_WCR + i, 0UL);
782 write_wb_reg(ARM_BASE_WVR + i, 0UL);
783 }
784}
785
786static int __init arch_hw_breakpoint_init(void)
787{
788 int ret = 0;
789 u32 dscr;
790
791 debug_arch = get_debug_arch();
792
793 if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
794 pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
795 ret = -ENODEV;
796 goto out;
797 }
798
799 /* Determine how many BRPs/WRPs are available. */
800 core_num_brps = get_num_brps();
801 core_num_wrps = get_num_wrps();
802
803 pr_info("found %d breakpoint and %d watchpoint registers.\n",
804 core_num_brps, core_num_wrps);
805
806 if (core_has_mismatch_bps())
807 pr_info("1 breakpoint reserved for watchpoint single-step.\n");
808
809 ARM_DBG_READ(c1, 0, dscr);
810 if (dscr & ARM_DSCR_HDBGEN) {
811 pr_warning("halting debug mode enabled. Assuming maximum "
812 "watchpoint size of 4 bytes.");
813 } else {
814 /* Work out the maximum supported watchpoint length. */
815 max_watchpoint_len = get_max_wp_len();
816 pr_info("maximum watchpoint size is %u bytes.\n",
817 max_watchpoint_len);
818
819 /*
820 * Reset the breakpoint resources. We assume that a halting
821 * debugger will leave the world in a nice state for us.
822 */
823 smp_call_function(reset_ctrl_regs, NULL, 1);
824 reset_ctrl_regs(NULL);
825 }
826
827 /* Register debug fault handler. */
828 hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
829 "watchpoint debug exception");
830 hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
831 "breakpoint debug exception");
832
833out:
834 return ret;
835}
836arch_initcall(arch_hw_breakpoint_init);
837
838void hw_breakpoint_pmu_read(struct perf_event *bp)
839{
840}
841
842/*
843 * Dummy function to register with die_notifier.
844 */
845int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
846 unsigned long val, void *data)
847{
848 return NOTIFY_DONE;
849}
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 6b4605893f1e..d9bd786ce23d 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -69,20 +69,31 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
69{ 69{
70#ifdef CONFIG_ARM_UNWIND 70#ifdef CONFIG_ARM_UNWIND
71 Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; 71 Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
72 struct arm_unwind_mapping *maps = mod->arch.map;
72 73
73 for (s = sechdrs; s < sechdrs_end; s++) { 74 for (s = sechdrs; s < sechdrs_end; s++) {
74 if (strcmp(".ARM.exidx.init.text", secstrings + s->sh_name) == 0) 75 char const *secname = secstrings + s->sh_name;
75 mod->arch.unw_sec_init = s; 76
76 else if (strcmp(".ARM.exidx.devinit.text", secstrings + s->sh_name) == 0) 77 if (strcmp(".ARM.exidx.init.text", secname) == 0)
77 mod->arch.unw_sec_devinit = s; 78 maps[ARM_SEC_INIT].unw_sec = s;
78 else if (strcmp(".ARM.exidx", secstrings + s->sh_name) == 0) 79 else if (strcmp(".ARM.exidx.devinit.text", secname) == 0)
79 mod->arch.unw_sec_core = s; 80 maps[ARM_SEC_DEVINIT].unw_sec = s;
80 else if (strcmp(".init.text", secstrings + s->sh_name) == 0) 81 else if (strcmp(".ARM.exidx", secname) == 0)
81 mod->arch.sec_init_text = s; 82 maps[ARM_SEC_CORE].unw_sec = s;
82 else if (strcmp(".devinit.text", secstrings + s->sh_name) == 0) 83 else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
83 mod->arch.sec_devinit_text = s; 84 maps[ARM_SEC_EXIT].unw_sec = s;
84 else if (strcmp(".text", secstrings + s->sh_name) == 0) 85 else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
85 mod->arch.sec_core_text = s; 86 maps[ARM_SEC_DEVEXIT].unw_sec = s;
87 else if (strcmp(".init.text", secname) == 0)
88 maps[ARM_SEC_INIT].sec_text = s;
89 else if (strcmp(".devinit.text", secname) == 0)
90 maps[ARM_SEC_DEVINIT].sec_text = s;
91 else if (strcmp(".text", secname) == 0)
92 maps[ARM_SEC_CORE].sec_text = s;
93 else if (strcmp(".exit.text", secname) == 0)
94 maps[ARM_SEC_EXIT].sec_text = s;
95 else if (strcmp(".devexit.text", secname) == 0)
96 maps[ARM_SEC_DEVEXIT].sec_text = s;
86 } 97 }
87#endif 98#endif
88 return 0; 99 return 0;
@@ -292,31 +303,22 @@ apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
292#ifdef CONFIG_ARM_UNWIND 303#ifdef CONFIG_ARM_UNWIND
293static void register_unwind_tables(struct module *mod) 304static void register_unwind_tables(struct module *mod)
294{ 305{
295 if (mod->arch.unw_sec_init && mod->arch.sec_init_text) 306 int i;
296 mod->arch.unwind_init = 307 for (i = 0; i < ARM_SEC_MAX; ++i) {
297 unwind_table_add(mod->arch.unw_sec_init->sh_addr, 308 struct arm_unwind_mapping *map = &mod->arch.map[i];
298 mod->arch.unw_sec_init->sh_size, 309 if (map->unw_sec && map->sec_text)
299 mod->arch.sec_init_text->sh_addr, 310 map->unwind = unwind_table_add(map->unw_sec->sh_addr,
300 mod->arch.sec_init_text->sh_size); 311 map->unw_sec->sh_size,
301 if (mod->arch.unw_sec_devinit && mod->arch.sec_devinit_text) 312 map->sec_text->sh_addr,
302 mod->arch.unwind_devinit = 313 map->sec_text->sh_size);
303 unwind_table_add(mod->arch.unw_sec_devinit->sh_addr, 314 }
304 mod->arch.unw_sec_devinit->sh_size,
305 mod->arch.sec_devinit_text->sh_addr,
306 mod->arch.sec_devinit_text->sh_size);
307 if (mod->arch.unw_sec_core && mod->arch.sec_core_text)
308 mod->arch.unwind_core =
309 unwind_table_add(mod->arch.unw_sec_core->sh_addr,
310 mod->arch.unw_sec_core->sh_size,
311 mod->arch.sec_core_text->sh_addr,
312 mod->arch.sec_core_text->sh_size);
313} 315}
314 316
315static void unregister_unwind_tables(struct module *mod) 317static void unregister_unwind_tables(struct module *mod)
316{ 318{
317 unwind_table_del(mod->arch.unwind_init); 319 int i = ARM_SEC_MAX;
318 unwind_table_del(mod->arch.unwind_devinit); 320 while (--i >= 0)
319 unwind_table_del(mod->arch.unwind_core); 321 unwind_table_del(mod->arch.map[i].unwind);
320} 322}
321#else 323#else
322static inline void register_unwind_tables(struct module *mod) { } 324static inline void register_unwind_tables(struct module *mod) { }
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 417c392ddf1c..ecbb0288e5dd 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc,
319{ 319{
320 struct hw_perf_event fake_event = event->hw; 320 struct hw_perf_event fake_event = event->hw;
321 321
322 if (event->pmu && event->pmu != &pmu) 322 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
323 return 0; 323 return 1;
324 324
325 return armpmu->get_event_idx(cpuc, &fake_event) >= 0; 325 return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
326} 326}
@@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num,
1041 /* 1041 /*
1042 * Handle the pending perf events. 1042 * Handle the pending perf events.
1043 * 1043 *
1044 * Note: this call *must* be run with interrupts enabled. For 1044 * Note: this call *must* be run with interrupts disabled. For
1045 * platforms that can have the PMU interrupts raised as a PMI, this 1045 * platforms that can have the PMU interrupts raised as an NMI, this
1046 * will not work. 1046 * will not work.
1047 */ 1047 */
1048 perf_event_do_pending(); 1048 perf_event_do_pending();
@@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
2017 /* 2017 /*
2018 * Handle the pending perf events. 2018 * Handle the pending perf events.
2019 * 2019 *
2020 * Note: this call *must* be run with interrupts enabled. For 2020 * Note: this call *must* be run with interrupts disabled. For
2021 * platforms that can have the PMU interrupts raised as a PMI, this 2021 * platforms that can have the PMU interrupts raised as an NMI, this
2022 * will not work. 2022 * will not work.
2023 */ 2023 */
2024 perf_event_do_pending(); 2024 perf_event_do_pending();
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 401e38be1f78..3af34bf4f4df 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -29,6 +29,7 @@
29#include <linux/utsname.h> 29#include <linux/utsname.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/random.h> 31#include <linux/random.h>
32#include <linux/hw_breakpoint.h>
32 33
33#include <asm/cacheflush.h> 34#include <asm/cacheflush.h>
34#include <asm/leds.h> 35#include <asm/leds.h>
@@ -135,6 +136,25 @@ EXPORT_SYMBOL(pm_power_off);
135void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart; 136void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
136EXPORT_SYMBOL_GPL(arm_pm_restart); 137EXPORT_SYMBOL_GPL(arm_pm_restart);
137 138
139static void do_nothing(void *unused)
140{
141}
142
143/*
144 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
145 * pm_idle and update to new pm_idle value. Required while changing pm_idle
146 * handler on SMP systems.
147 *
148 * Caller must have changed pm_idle to the new value before the call. Old
149 * pm_idle value will not be used by any CPU after the return of this function.
150 */
151void cpu_idle_wait(void)
152{
153 smp_mb();
154 /* kick all the CPUs so that they exit out of pm_idle */
155 smp_call_function(do_nothing, NULL, 1);
156}
157EXPORT_SYMBOL_GPL(cpu_idle_wait);
138 158
139/* 159/*
140 * This is our default idle handler. We need to disable 160 * This is our default idle handler. We need to disable
@@ -317,6 +337,8 @@ void flush_thread(void)
317 struct thread_info *thread = current_thread_info(); 337 struct thread_info *thread = current_thread_info();
318 struct task_struct *tsk = current; 338 struct task_struct *tsk = current;
319 339
340 flush_ptrace_hw_breakpoint(tsk);
341
320 memset(thread->used_cp, 0, sizeof(thread->used_cp)); 342 memset(thread->used_cp, 0, sizeof(thread->used_cp));
321 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 343 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
322 memset(&thread->fpstate, 0, sizeof(union fp_state)); 344 memset(&thread->fpstate, 0, sizeof(union fp_state));
@@ -345,6 +367,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
345 thread->cpu_context.sp = (unsigned long)childregs; 367 thread->cpu_context.sp = (unsigned long)childregs;
346 thread->cpu_context.pc = (unsigned long)ret_from_fork; 368 thread->cpu_context.pc = (unsigned long)ret_from_fork;
347 369
370 clear_ptrace_hw_breakpoint(p);
371
348 if (clone_flags & CLONE_SETTLS) 372 if (clone_flags & CLONE_SETTLS)
349 thread->tp_value = regs->ARM_r3; 373 thread->tp_value = regs->ARM_r3;
350 374
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index f99d489822d5..e0cb6370ed14 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -19,6 +19,8 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/signal.h> 20#include <linux/signal.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/perf_event.h>
23#include <linux/hw_breakpoint.h>
22 24
23#include <asm/pgtable.h> 25#include <asm/pgtable.h>
24#include <asm/system.h> 26#include <asm/system.h>
@@ -847,6 +849,232 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data)
847} 849}
848#endif 850#endif
849 851
852#ifdef CONFIG_HAVE_HW_BREAKPOINT
853/*
854 * Convert a virtual register number into an index for a thread_info
855 * breakpoint array. Breakpoints are identified using positive numbers
856 * whilst watchpoints are negative. The registers are laid out as pairs
857 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
858 * Register 0 is reserved for describing resource information.
859 */
860static int ptrace_hbp_num_to_idx(long num)
861{
862 if (num < 0)
863 num = (ARM_MAX_BRP << 1) - num;
864 return (num - 1) >> 1;
865}
866
867/*
868 * Returns the virtual register number for the address of the
869 * breakpoint at index idx.
870 */
871static long ptrace_hbp_idx_to_num(int idx)
872{
873 long mid = ARM_MAX_BRP << 1;
874 long num = (idx << 1) + 1;
875 return num > mid ? mid - num : num;
876}
877
878/*
879 * Handle hitting a HW-breakpoint.
880 */
881static void ptrace_hbptriggered(struct perf_event *bp, int unused,
882 struct perf_sample_data *data,
883 struct pt_regs *regs)
884{
885 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
886 long num;
887 int i;
888 siginfo_t info;
889
890 for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
891 if (current->thread.debug.hbp[i] == bp)
892 break;
893
894 num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
895
896 info.si_signo = SIGTRAP;
897 info.si_errno = (int)num;
898 info.si_code = TRAP_HWBKPT;
899 info.si_addr = (void __user *)(bkpt->trigger);
900
901 force_sig_info(SIGTRAP, &info, current);
902}
903
904/*
905 * Set ptrace breakpoint pointers to zero for this task.
906 * This is required in order to prevent child processes from unregistering
907 * breakpoints held by their parent.
908 */
909void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
910{
911 memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
912}
913
914/*
915 * Unregister breakpoints from this task and reset the pointers in
916 * the thread_struct.
917 */
918void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
919{
920 int i;
921 struct thread_struct *t = &tsk->thread;
922
923 for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
924 if (t->debug.hbp[i]) {
925 unregister_hw_breakpoint(t->debug.hbp[i]);
926 t->debug.hbp[i] = NULL;
927 }
928 }
929}
930
931static u32 ptrace_get_hbp_resource_info(void)
932{
933 u8 num_brps, num_wrps, debug_arch, wp_len;
934 u32 reg = 0;
935
936 num_brps = hw_breakpoint_slots(TYPE_INST);
937 num_wrps = hw_breakpoint_slots(TYPE_DATA);
938 debug_arch = arch_get_debug_arch();
939 wp_len = arch_get_max_wp_len();
940
941 reg |= debug_arch;
942 reg <<= 8;
943 reg |= wp_len;
944 reg <<= 8;
945 reg |= num_wrps;
946 reg <<= 8;
947 reg |= num_brps;
948
949 return reg;
950}
951
952static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
953{
954 struct perf_event_attr attr;
955
956 ptrace_breakpoint_init(&attr);
957
958 /* Initialise fields to sane defaults. */
959 attr.bp_addr = 0;
960 attr.bp_len = HW_BREAKPOINT_LEN_4;
961 attr.bp_type = type;
962 attr.disabled = 1;
963
964 return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk);
965}
966
967static int ptrace_gethbpregs(struct task_struct *tsk, long num,
968 unsigned long __user *data)
969{
970 u32 reg;
971 int idx, ret = 0;
972 struct perf_event *bp;
973 struct arch_hw_breakpoint_ctrl arch_ctrl;
974
975 if (num == 0) {
976 reg = ptrace_get_hbp_resource_info();
977 } else {
978 idx = ptrace_hbp_num_to_idx(num);
979 if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
980 ret = -EINVAL;
981 goto out;
982 }
983
984 bp = tsk->thread.debug.hbp[idx];
985 if (!bp) {
986 reg = 0;
987 goto put;
988 }
989
990 arch_ctrl = counter_arch_bp(bp)->ctrl;
991
992 /*
993 * Fix up the len because we may have adjusted it
994 * to compensate for an unaligned address.
995 */
996 while (!(arch_ctrl.len & 0x1))
997 arch_ctrl.len >>= 1;
998
999 if (idx & 0x1)
1000 reg = encode_ctrl_reg(arch_ctrl);
1001 else
1002 reg = bp->attr.bp_addr;
1003 }
1004
1005put:
1006 if (put_user(reg, data))
1007 ret = -EFAULT;
1008
1009out:
1010 return ret;
1011}
1012
1013static int ptrace_sethbpregs(struct task_struct *tsk, long num,
1014 unsigned long __user *data)
1015{
1016 int idx, gen_len, gen_type, implied_type, ret = 0;
1017 u32 user_val;
1018 struct perf_event *bp;
1019 struct arch_hw_breakpoint_ctrl ctrl;
1020 struct perf_event_attr attr;
1021
1022 if (num == 0)
1023 goto out;
1024 else if (num < 0)
1025 implied_type = HW_BREAKPOINT_RW;
1026 else
1027 implied_type = HW_BREAKPOINT_X;
1028
1029 idx = ptrace_hbp_num_to_idx(num);
1030 if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
1031 ret = -EINVAL;
1032 goto out;
1033 }
1034
1035 if (get_user(user_val, data)) {
1036 ret = -EFAULT;
1037 goto out;
1038 }
1039
1040 bp = tsk->thread.debug.hbp[idx];
1041 if (!bp) {
1042 bp = ptrace_hbp_create(tsk, implied_type);
1043 if (IS_ERR(bp)) {
1044 ret = PTR_ERR(bp);
1045 goto out;
1046 }
1047 tsk->thread.debug.hbp[idx] = bp;
1048 }
1049
1050 attr = bp->attr;
1051
1052 if (num & 0x1) {
1053 /* Address */
1054 attr.bp_addr = user_val;
1055 } else {
1056 /* Control */
1057 decode_ctrl_reg(user_val, &ctrl);
1058 ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
1059 if (ret)
1060 goto out;
1061
1062 if ((gen_type & implied_type) != gen_type) {
1063 ret = -EINVAL;
1064 goto out;
1065 }
1066
1067 attr.bp_len = gen_len;
1068 attr.bp_type = gen_type;
1069 attr.disabled = !ctrl.enabled;
1070 }
1071
1072 ret = modify_user_hw_breakpoint(bp, &attr);
1073out:
1074 return ret;
1075}
1076#endif
1077
850long arch_ptrace(struct task_struct *child, long request, long addr, long data) 1078long arch_ptrace(struct task_struct *child, long request, long addr, long data)
851{ 1079{
852 int ret; 1080 int ret;
@@ -916,6 +1144,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
916 break; 1144 break;
917#endif 1145#endif
918 1146
1147#ifdef CONFIG_HAVE_HW_BREAKPOINT
1148 case PTRACE_GETHBPREGS:
1149 ret = ptrace_gethbpregs(child, addr,
1150 (unsigned long __user *)data);
1151 break;
1152 case PTRACE_SETHBPREGS:
1153 ret = ptrace_sethbpregs(child, addr,
1154 (unsigned long __user *)data);
1155 break;
1156#endif
1157
919 default: 1158 default:
920 ret = ptrace_request(child, request, addr, data); 1159 ret = ptrace_request(child, request, addr, data);
921 break; 1160 break;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index d5231ae7355a..336f14e0e5c2 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -36,6 +36,7 @@
36#include <asm/procinfo.h> 36#include <asm/procinfo.h>
37#include <asm/sections.h> 37#include <asm/sections.h>
38#include <asm/setup.h> 38#include <asm/setup.h>
39#include <asm/smp_plat.h>
39#include <asm/mach-types.h> 40#include <asm/mach-types.h>
40#include <asm/cacheflush.h> 41#include <asm/cacheflush.h>
41#include <asm/cachetype.h> 42#include <asm/cachetype.h>
@@ -238,6 +239,35 @@ int cpu_architecture(void)
238 return cpu_arch; 239 return cpu_arch;
239} 240}
240 241
242static int cpu_has_aliasing_icache(unsigned int arch)
243{
244 int aliasing_icache;
245 unsigned int id_reg, num_sets, line_size;
246
247 /* arch specifies the register format */
248 switch (arch) {
249 case CPU_ARCH_ARMv7:
250 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
251 : /* No output operands */
252 : "r" (1));
253 isb();
254 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
255 : "=r" (id_reg));
256 line_size = 4 << ((id_reg & 0x7) + 2);
257 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
258 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
259 break;
260 case CPU_ARCH_ARMv6:
261 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
262 break;
263 default:
264 /* I-cache aliases will be handled by D-cache aliasing code */
265 aliasing_icache = 0;
266 }
267
268 return aliasing_icache;
269}
270
241static void __init cacheid_init(void) 271static void __init cacheid_init(void)
242{ 272{
243 unsigned int cachetype = read_cpuid_cachetype(); 273 unsigned int cachetype = read_cpuid_cachetype();
@@ -249,10 +279,15 @@ static void __init cacheid_init(void)
249 cacheid = CACHEID_VIPT_NONALIASING; 279 cacheid = CACHEID_VIPT_NONALIASING;
250 if ((cachetype & (3 << 14)) == 1 << 14) 280 if ((cachetype & (3 << 14)) == 1 << 14)
251 cacheid |= CACHEID_ASID_TAGGED; 281 cacheid |= CACHEID_ASID_TAGGED;
252 } else if (cachetype & (1 << 23)) 282 else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
283 cacheid |= CACHEID_VIPT_I_ALIASING;
284 } else if (cachetype & (1 << 23)) {
253 cacheid = CACHEID_VIPT_ALIASING; 285 cacheid = CACHEID_VIPT_ALIASING;
254 else 286 } else {
255 cacheid = CACHEID_VIPT_NONALIASING; 287 cacheid = CACHEID_VIPT_NONALIASING;
288 if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
289 cacheid |= CACHEID_VIPT_I_ALIASING;
290 }
256 } else { 291 } else {
257 cacheid = CACHEID_VIVT; 292 cacheid = CACHEID_VIVT;
258 } 293 }
@@ -263,7 +298,7 @@ static void __init cacheid_init(void)
263 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown", 298 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
264 cache_is_vivt() ? "VIVT" : 299 cache_is_vivt() ? "VIVT" :
265 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : 300 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
266 cache_is_vipt_aliasing() ? "VIPT aliasing" : 301 icache_is_vipt_aliasing() ? "VIPT aliasing" :
267 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); 302 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
268} 303}
269 304
@@ -490,7 +525,7 @@ request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
490 525
491 kernel_code.start = virt_to_phys(_text); 526 kernel_code.start = virt_to_phys(_text);
492 kernel_code.end = virt_to_phys(_etext - 1); 527 kernel_code.end = virt_to_phys(_etext - 1);
493 kernel_data.start = virt_to_phys(_data); 528 kernel_data.start = virt_to_phys(_sdata);
494 kernel_data.end = virt_to_phys(_end - 1); 529 kernel_data.end = virt_to_phys(_end - 1);
495 530
496 for (i = 0; i < mi->nr_banks; i++) { 531 for (i = 0; i < mi->nr_banks; i++) {
@@ -825,7 +860,8 @@ void __init setup_arch(char **cmdline_p)
825 request_standard_resources(&meminfo, mdesc); 860 request_standard_resources(&meminfo, mdesc);
826 861
827#ifdef CONFIG_SMP 862#ifdef CONFIG_SMP
828 smp_init_cpus(); 863 if (is_smp())
864 smp_init_cpus();
829#endif 865#endif
830 reserve_crashkernel(); 866 reserve_crashkernel();
831 867
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 40dc74f2b27f..32e16da5cbce 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -567,7 +567,8 @@ void smp_send_stop(void)
567{ 567{
568 cpumask_t mask = cpu_online_map; 568 cpumask_t mask = cpu_online_map;
569 cpu_clear(smp_processor_id(), mask); 569 cpu_clear(smp_processor_id(), mask);
570 send_ipi_message(&mask, IPI_CPU_STOP); 570 if (!cpus_empty(mask))
571 send_ipi_message(&mask, IPI_CPU_STOP);
571} 572}
572 573
573/* 574/*
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index dd81a918c106..2a161765f6d5 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -146,6 +146,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr)
146 addr < table->end_addr) { 146 addr < table->end_addr) {
147 idx = search_index(addr, table->start, 147 idx = search_index(addr, table->start,
148 table->stop - 1); 148 table->stop - 1);
149 /* Move-to-front to exploit common traces */
150 list_move(&table->list, &unwind_tables);
149 break; 151 break;
150 } 152 }
151 } 153 }
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index b16c07914b55..065d35de0e01 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -40,6 +40,11 @@ SECTIONS
40 __tagtable_begin = .; 40 __tagtable_begin = .;
41 *(.taglist.init) 41 *(.taglist.init)
42 __tagtable_end = .; 42 __tagtable_end = .;
43#ifdef CONFIG_SMP_ON_UP
44 __smpalt_begin = .;
45 *(.alt.smp.init)
46 __smpalt_end = .;
47#endif
43 48
44 INIT_SETUP(16) 49 INIT_SETUP(16)
45 50
@@ -104,8 +109,6 @@ SECTIONS
104 109
105 RO_DATA(PAGE_SIZE) 110 RO_DATA(PAGE_SIZE)
106 111
107 _etext = .; /* End of text and rodata section */
108
109#ifdef CONFIG_ARM_UNWIND 112#ifdef CONFIG_ARM_UNWIND
110 /* 113 /*
111 * Stack unwinding tables 114 * Stack unwinding tables
@@ -123,6 +126,8 @@ SECTIONS
123 } 126 }
124#endif 127#endif
125 128
129 _etext = .; /* End of text and rodata section */
130
126#ifdef CONFIG_XIP_KERNEL 131#ifdef CONFIG_XIP_KERNEL
127 __data_loc = ALIGN(4); /* location in binary */ 132 __data_loc = ALIGN(4); /* location in binary */
128 . = PAGE_OFFSET + TEXT_OFFSET; 133 . = PAGE_OFFSET + TEXT_OFFSET;
@@ -237,6 +242,12 @@ SECTIONS
237 242
238 /* Default discards */ 243 /* Default discards */
239 DISCARDS 244 DISCARDS
245
246#ifndef CONFIG_SMP_ON_UP
247 /DISCARD/ : {
248 *(.alt.smp.init)
249 }
250#endif
240} 251}
241 252
242/* 253/*