aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-11-26 05:28:11 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-11-26 05:28:11 -0500
commit83cf1eecfe9afee99d6b86f963187acd414c019d (patch)
tree4381bdcc45d81c8c3713630cae9660d770885d96
parentf1690d17d2c7afa2a2079e3c91eb2bca8c1c5ecd (diff)
parent0e341af835fdf553820a1fa98341b93ab32ce466 (diff)
Merge branch 'ftrace' of git://github.com/rabinv/linux-2.6 into devel-stable
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/include/asm/system.h5
-rw-r--r--arch/arm/include/asm/traps.h23
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/entry-common.S202
-rw-r--r--arch/arm/kernel/ftrace.c103
-rw-r--r--arch/arm/kernel/irq.c4
-rw-r--r--arch/arm/kernel/smp.c5
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/plat-versatile/sched-clock.c1
11 files changed, 273 insertions, 77 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c8fa1443e2c1..49778bb43782 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -14,6 +14,7 @@ config ARM
14 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) 14 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
15 select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) 15 select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
16 select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) 16 select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)
17 select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
17 select HAVE_GENERIC_DMA_COHERENT 18 select HAVE_GENERIC_DMA_COHERENT
18 select HAVE_KERNEL_GZIP 19 select HAVE_KERNEL_GZIP
19 select HAVE_KERNEL_LZO 20 select HAVE_KERNEL_LZO
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 2fd0b99afc4b..eac62085f5b2 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -23,7 +23,7 @@ config STRICT_DEVMEM
23config FRAME_POINTER 23config FRAME_POINTER
24 bool 24 bool
25 depends on !THUMB2_KERNEL 25 depends on !THUMB2_KERNEL
26 default y if !ARM_UNWIND 26 default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
27 help 27 help
28 If you say N here, the resulting kernel will be slightly smaller and 28 If you say N here, the resulting kernel will be slightly smaller and
29 faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled, 29 faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 1120f18a6b17..ec4327a4653d 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -63,6 +63,11 @@
63#include <asm/outercache.h> 63#include <asm/outercache.h>
64 64
65#define __exception __attribute__((section(".exception.text"))) 65#define __exception __attribute__((section(".exception.text")))
66#ifdef CONFIG_FUNCTION_GRAPH_TRACER
67#define __exception_irq_entry __irq_entry
68#else
69#define __exception_irq_entry __exception
70#endif
66 71
67struct thread_info; 72struct thread_info;
68struct task_struct; 73struct task_struct;
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index 491960bf4260..124475afb007 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -15,13 +15,32 @@ struct undef_hook {
15void register_undef_hook(struct undef_hook *hook); 15void register_undef_hook(struct undef_hook *hook);
16void unregister_undef_hook(struct undef_hook *hook); 16void unregister_undef_hook(struct undef_hook *hook);
17 17
18#ifdef CONFIG_FUNCTION_GRAPH_TRACER
19static inline int __in_irqentry_text(unsigned long ptr)
20{
21 extern char __irqentry_text_start[];
22 extern char __irqentry_text_end[];
23
24 return ptr >= (unsigned long)&__irqentry_text_start &&
25 ptr < (unsigned long)&__irqentry_text_end;
26}
27#else
28static inline int __in_irqentry_text(unsigned long ptr)
29{
30 return 0;
31}
32#endif
33
18static inline int in_exception_text(unsigned long ptr) 34static inline int in_exception_text(unsigned long ptr)
19{ 35{
20 extern char __exception_text_start[]; 36 extern char __exception_text_start[];
21 extern char __exception_text_end[]; 37 extern char __exception_text_end[];
38 int in;
39
40 in = ptr >= (unsigned long)&__exception_text_start &&
41 ptr < (unsigned long)&__exception_text_end;
22 42
23 return ptr >= (unsigned long)&__exception_text_start && 43 return in ? : __in_irqentry_text(ptr);
24 ptr < (unsigned long)&__exception_text_end;
25} 44}
26 45
27extern void __init early_trap_init(void); 46extern void __init early_trap_init(void);
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5b9b268f4fbb..679851a9f589 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -5,7 +5,7 @@
5CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) 5CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
6AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) 6AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
7 7
8ifdef CONFIG_DYNAMIC_FTRACE 8ifdef CONFIG_FUNCTION_TRACER
9CFLAGS_REMOVE_ftrace.o = -pg 9CFLAGS_REMOVE_ftrace.o = -pg
10endif 10endif
11 11
@@ -33,6 +33,7 @@ obj-$(CONFIG_SMP) += smp.o
33obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o 33obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
34obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o 34obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
35obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 35obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
36obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
36obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 37obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
37obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o 38obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o
38obj-$(CONFIG_ATAGS_PROC) += atags.o 39obj-$(CONFIG_ATAGS_PROC) += atags.o
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 8bfa98757cd2..aae802ee12f8 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -141,98 +141,170 @@ ENDPROC(ret_from_fork)
141#endif 141#endif
142#endif 142#endif
143 143
144#ifdef CONFIG_DYNAMIC_FTRACE 144.macro __mcount suffix
145ENTRY(__gnu_mcount_nc) 145 mcount_enter
146 mov ip, lr 146 ldr r0, =ftrace_trace_function
147 ldmia sp!, {lr} 147 ldr r2, [r0]
148 mov pc, ip 148 adr r0, .Lftrace_stub
149ENDPROC(__gnu_mcount_nc) 149 cmp r0, r2
150 bne 1f
151
152#ifdef CONFIG_FUNCTION_GRAPH_TRACER
153 ldr r1, =ftrace_graph_return
154 ldr r2, [r1]
155 cmp r0, r2
156 bne ftrace_graph_caller\suffix
157
158 ldr r1, =ftrace_graph_entry
159 ldr r2, [r1]
160 ldr r0, =ftrace_graph_entry_stub
161 cmp r0, r2
162 bne ftrace_graph_caller\suffix
163#endif
150 164
151ENTRY(ftrace_caller) 165 mcount_exit
152 stmdb sp!, {r0-r3, lr} 166
153 mov r0, lr 1671: mcount_get_lr r1 @ lr of instrumented func
168 mov r0, lr @ instrumented function
169 sub r0, r0, #MCOUNT_INSN_SIZE
170 adr lr, BSYM(2f)
171 mov pc, r2
1722: mcount_exit
173.endm
174
175.macro __ftrace_caller suffix
176 mcount_enter
177
178 mcount_get_lr r1 @ lr of instrumented func
179 mov r0, lr @ instrumented function
154 sub r0, r0, #MCOUNT_INSN_SIZE 180 sub r0, r0, #MCOUNT_INSN_SIZE
155 ldr r1, [sp, #20]
156 181
157 .global ftrace_call 182 .globl ftrace_call\suffix
158ftrace_call: 183ftrace_call\suffix:
159 bl ftrace_stub 184 bl ftrace_stub
160 ldmia sp!, {r0-r3, ip, lr} 185
161 mov pc, ip 186#ifdef CONFIG_FUNCTION_GRAPH_TRACER
162ENDPROC(ftrace_caller) 187 .globl ftrace_graph_call\suffix
188ftrace_graph_call\suffix:
189 mov r0, r0
190#endif
191
192 mcount_exit
193.endm
194
195.macro __ftrace_graph_caller
196 sub r0, fp, #4 @ &lr of instrumented routine (&parent)
197#ifdef CONFIG_DYNAMIC_FTRACE
198 @ called from __ftrace_caller, saved in mcount_enter
199 ldr r1, [sp, #16] @ instrumented routine (func)
200#else
201 @ called from __mcount, untouched in lr
202 mov r1, lr @ instrumented routine (func)
203#endif
204 sub r1, r1, #MCOUNT_INSN_SIZE
205 mov r2, fp @ frame pointer
206 bl prepare_ftrace_return
207 mcount_exit
208.endm
163 209
164#ifdef CONFIG_OLD_MCOUNT 210#ifdef CONFIG_OLD_MCOUNT
211/*
212 * mcount
213 */
214
215.macro mcount_enter
216 stmdb sp!, {r0-r3, lr}
217.endm
218
219.macro mcount_get_lr reg
220 ldr \reg, [fp, #-4]
221.endm
222
223.macro mcount_exit
224 ldr lr, [fp, #-4]
225 ldmia sp!, {r0-r3, pc}
226.endm
227
165ENTRY(mcount) 228ENTRY(mcount)
229#ifdef CONFIG_DYNAMIC_FTRACE
166 stmdb sp!, {lr} 230 stmdb sp!, {lr}
167 ldr lr, [fp, #-4] 231 ldr lr, [fp, #-4]
168 ldmia sp!, {pc} 232 ldmia sp!, {pc}
233#else
234 __mcount _old
235#endif
169ENDPROC(mcount) 236ENDPROC(mcount)
170 237
238#ifdef CONFIG_DYNAMIC_FTRACE
171ENTRY(ftrace_caller_old) 239ENTRY(ftrace_caller_old)
172 stmdb sp!, {r0-r3, lr} 240 __ftrace_caller _old
173 ldr r1, [fp, #-4]
174 mov r0, lr
175 sub r0, r0, #MCOUNT_INSN_SIZE
176
177 .globl ftrace_call_old
178ftrace_call_old:
179 bl ftrace_stub
180 ldr lr, [fp, #-4] @ restore lr
181 ldmia sp!, {r0-r3, pc}
182ENDPROC(ftrace_caller_old) 241ENDPROC(ftrace_caller_old)
183#endif 242#endif
184 243
185#else 244#ifdef CONFIG_FUNCTION_GRAPH_TRACER
245ENTRY(ftrace_graph_caller_old)
246 __ftrace_graph_caller
247ENDPROC(ftrace_graph_caller_old)
248#endif
186 249
187ENTRY(__gnu_mcount_nc) 250.purgem mcount_enter
251.purgem mcount_get_lr
252.purgem mcount_exit
253#endif
254
255/*
256 * __gnu_mcount_nc
257 */
258
259.macro mcount_enter
188 stmdb sp!, {r0-r3, lr} 260 stmdb sp!, {r0-r3, lr}
189 ldr r0, =ftrace_trace_function 261.endm
190 ldr r2, [r0] 262
191 adr r0, .Lftrace_stub 263.macro mcount_get_lr reg
192 cmp r0, r2 264 ldr \reg, [sp, #20]
193 bne gnu_trace 265.endm
266
267.macro mcount_exit
194 ldmia sp!, {r0-r3, ip, lr} 268 ldmia sp!, {r0-r3, ip, lr}
195 mov pc, ip 269 mov pc, ip
270.endm
196 271
197gnu_trace: 272ENTRY(__gnu_mcount_nc)
198 ldr r1, [sp, #20] @ lr of instrumented routine 273#ifdef CONFIG_DYNAMIC_FTRACE
199 mov r0, lr 274 mov ip, lr
200 sub r0, r0, #MCOUNT_INSN_SIZE 275 ldmia sp!, {lr}
201 adr lr, BSYM(1f)
202 mov pc, r2
2031:
204 ldmia sp!, {r0-r3, ip, lr}
205 mov pc, ip 276 mov pc, ip
277#else
278 __mcount
279#endif
206ENDPROC(__gnu_mcount_nc) 280ENDPROC(__gnu_mcount_nc)
207 281
208#ifdef CONFIG_OLD_MCOUNT 282#ifdef CONFIG_DYNAMIC_FTRACE
209/* 283ENTRY(ftrace_caller)
210 * This is under an ifdef in order to force link-time errors for people trying 284 __ftrace_caller
211 * to build with !FRAME_POINTER with a GCC which doesn't use the new-style 285ENDPROC(ftrace_caller)
212 * mcount. 286#endif
213 */
214ENTRY(mcount)
215 stmdb sp!, {r0-r3, lr}
216 ldr r0, =ftrace_trace_function
217 ldr r2, [r0]
218 adr r0, ftrace_stub
219 cmp r0, r2
220 bne trace
221 ldr lr, [fp, #-4] @ restore lr
222 ldmia sp!, {r0-r3, pc}
223 287
224trace: 288#ifdef CONFIG_FUNCTION_GRAPH_TRACER
225 ldr r1, [fp, #-4] @ lr of instrumented routine 289ENTRY(ftrace_graph_caller)
226 mov r0, lr 290 __ftrace_graph_caller
227 sub r0, r0, #MCOUNT_INSN_SIZE 291ENDPROC(ftrace_graph_caller)
228 mov lr, pc
229 mov pc, r2
230 ldr lr, [fp, #-4] @ restore lr
231 ldmia sp!, {r0-r3, pc}
232ENDPROC(mcount)
233#endif 292#endif
234 293
235#endif /* CONFIG_DYNAMIC_FTRACE */ 294.purgem mcount_enter
295.purgem mcount_get_lr
296.purgem mcount_exit
297
298#ifdef CONFIG_FUNCTION_GRAPH_TRACER
299 .globl return_to_handler
300return_to_handler:
301 stmdb sp!, {r0-r3}
302 mov r0, fp @ frame pointer
303 bl ftrace_return_to_handler
304 mov lr, r0 @ r0 has real ret addr
305 ldmia sp!, {r0-r3}
306 mov pc, lr
307#endif
236 308
237ENTRY(ftrace_stub) 309ENTRY(ftrace_stub)
238.Lftrace_stub: 310.Lftrace_stub:
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 971ac8c36ea7..c0062ad1e847 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -24,6 +24,7 @@
24#define NOP 0xe8bd4000 /* pop {lr} */ 24#define NOP 0xe8bd4000 /* pop {lr} */
25#endif 25#endif
26 26
27#ifdef CONFIG_DYNAMIC_FTRACE
27#ifdef CONFIG_OLD_MCOUNT 28#ifdef CONFIG_OLD_MCOUNT
28#define OLD_MCOUNT_ADDR ((unsigned long) mcount) 29#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
29#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) 30#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
@@ -59,9 +60,9 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
59} 60}
60#endif 61#endif
61 62
62/* construct a branch (BL) instruction to addr */
63#ifdef CONFIG_THUMB2_KERNEL 63#ifdef CONFIG_THUMB2_KERNEL
64static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) 64static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
65 bool link)
65{ 66{
66 unsigned long s, j1, j2, i1, i2, imm10, imm11; 67 unsigned long s, j1, j2, i1, i2, imm10, imm11;
67 unsigned long first, second; 68 unsigned long first, second;
@@ -83,15 +84,22 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
83 j2 = (!i2) ^ s; 84 j2 = (!i2) ^ s;
84 85
85 first = 0xf000 | (s << 10) | imm10; 86 first = 0xf000 | (s << 10) | imm10;
86 second = 0xd000 | (j1 << 13) | (j2 << 11) | imm11; 87 second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
88 if (link)
89 second |= 1 << 14;
87 90
88 return (second << 16) | first; 91 return (second << 16) | first;
89} 92}
90#else 93#else
91static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) 94static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
95 bool link)
92{ 96{
97 unsigned long opcode = 0xea000000;
93 long offset; 98 long offset;
94 99
100 if (link)
101 opcode |= 1 << 24;
102
95 offset = (long)addr - (long)(pc + 8); 103 offset = (long)addr - (long)(pc + 8);
96 if (unlikely(offset < -33554432 || offset > 33554428)) { 104 if (unlikely(offset < -33554432 || offset > 33554428)) {
97 /* Can't generate branches that far (from ARM ARM). Ftrace 105 /* Can't generate branches that far (from ARM ARM). Ftrace
@@ -103,10 +111,15 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
103 111
104 offset = (offset >> 2) & 0x00ffffff; 112 offset = (offset >> 2) & 0x00ffffff;
105 113
106 return 0xeb000000 | offset; 114 return opcode | offset;
107} 115}
108#endif 116#endif
109 117
118static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
119{
120 return ftrace_gen_branch(pc, addr, true);
121}
122
110static int ftrace_modify_code(unsigned long pc, unsigned long old, 123static int ftrace_modify_code(unsigned long pc, unsigned long old,
111 unsigned long new) 124 unsigned long new)
112{ 125{
@@ -193,3 +206,83 @@ int __init ftrace_dyn_arch_init(void *data)
193 206
194 return 0; 207 return 0;
195} 208}
209#endif /* CONFIG_DYNAMIC_FTRACE */
210
211#ifdef CONFIG_FUNCTION_GRAPH_TRACER
212void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
213 unsigned long frame_pointer)
214{
215 unsigned long return_hooker = (unsigned long) &return_to_handler;
216 struct ftrace_graph_ent trace;
217 unsigned long old;
218 int err;
219
220 if (unlikely(atomic_read(&current->tracing_graph_pause)))
221 return;
222
223 old = *parent;
224 *parent = return_hooker;
225
226 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
227 frame_pointer);
228 if (err == -EBUSY) {
229 *parent = old;
230 return;
231 }
232
233 trace.func = self_addr;
234
235 /* Only trace if the calling function expects to */
236 if (!ftrace_graph_entry(&trace)) {
237 current->curr_ret_stack--;
238 *parent = old;
239 }
240}
241
242#ifdef CONFIG_DYNAMIC_FTRACE
243extern unsigned long ftrace_graph_call;
244extern unsigned long ftrace_graph_call_old;
245extern void ftrace_graph_caller_old(void);
246
247static int __ftrace_modify_caller(unsigned long *callsite,
248 void (*func) (void), bool enable)
249{
250 unsigned long caller_fn = (unsigned long) func;
251 unsigned long pc = (unsigned long) callsite;
252 unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
253 unsigned long nop = 0xe1a00000; /* mov r0, r0 */
254 unsigned long old = enable ? nop : branch;
255 unsigned long new = enable ? branch : nop;
256
257 return ftrace_modify_code(pc, old, new);
258}
259
260static int ftrace_modify_graph_caller(bool enable)
261{
262 int ret;
263
264 ret = __ftrace_modify_caller(&ftrace_graph_call,
265 ftrace_graph_caller,
266 enable);
267
268#ifdef CONFIG_OLD_MCOUNT
269 if (!ret)
270 ret = __ftrace_modify_caller(&ftrace_graph_call_old,
271 ftrace_graph_caller_old,
272 enable);
273#endif
274
275 return ret;
276}
277
278int ftrace_enable_ftrace_graph_caller(void)
279{
280 return ftrace_modify_graph_caller(true);
281}
282
283int ftrace_disable_ftrace_graph_caller(void)
284{
285 return ftrace_modify_graph_caller(false);
286}
287#endif /* CONFIG_DYNAMIC_FTRACE */
288#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 36ad3be4692a..6d616333340f 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -35,6 +35,7 @@
35#include <linux/list.h> 35#include <linux/list.h>
36#include <linux/kallsyms.h> 36#include <linux/kallsyms.h>
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/ftrace.h>
38 39
39#include <asm/system.h> 40#include <asm/system.h>
40#include <asm/mach/irq.h> 41#include <asm/mach/irq.h>
@@ -105,7 +106,8 @@ unlock:
105 * come via this function. Instead, they should provide their 106 * come via this function. Instead, they should provide their
106 * own 'handler' 107 * own 'handler'
107 */ 108 */
108asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs) 109asmlinkage void __exception_irq_entry
110asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
109{ 111{
110 struct pt_regs *old_regs = set_irq_regs(regs); 112 struct pt_regs *old_regs = set_irq_regs(regs);
111 113
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 8c1959590252..bbca89872c18 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -16,6 +16,7 @@
16#include <linux/cache.h> 16#include <linux/cache.h>
17#include <linux/profile.h> 17#include <linux/profile.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/ftrace.h>
19#include <linux/mm.h> 20#include <linux/mm.h>
20#include <linux/err.h> 21#include <linux/err.h>
21#include <linux/cpu.h> 22#include <linux/cpu.h>
@@ -457,7 +458,7 @@ static void ipi_timer(void)
457} 458}
458 459
459#ifdef CONFIG_LOCAL_TIMERS 460#ifdef CONFIG_LOCAL_TIMERS
460asmlinkage void __exception do_local_timer(struct pt_regs *regs) 461asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
461{ 462{
462 struct pt_regs *old_regs = set_irq_regs(regs); 463 struct pt_regs *old_regs = set_irq_regs(regs);
463 int cpu = smp_processor_id(); 464 int cpu = smp_processor_id();
@@ -544,7 +545,7 @@ static void ipi_cpu_stop(unsigned int cpu)
544 * 545 *
545 * Bit 0 - Inter-processor function call 546 * Bit 0 - Inter-processor function call
546 */ 547 */
547asmlinkage void __exception do_IPI(struct pt_regs *regs) 548asmlinkage void __exception_irq_entry do_IPI(struct pt_regs *regs)
548{ 549{
549 unsigned int cpu = smp_processor_id(); 550 unsigned int cpu = smp_processor_id();
550 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 551 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index cead8893b46b..897c1a8f1694 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -101,6 +101,7 @@ SECTIONS
101 __exception_text_start = .; 101 __exception_text_start = .;
102 *(.exception.text) 102 *(.exception.text)
103 __exception_text_end = .; 103 __exception_text_end = .;
104 IRQENTRY_TEXT
104 TEXT_TEXT 105 TEXT_TEXT
105 SCHED_TEXT 106 SCHED_TEXT
106 LOCK_TEXT 107 LOCK_TEXT
diff --git a/arch/arm/plat-versatile/sched-clock.c b/arch/arm/plat-versatile/sched-clock.c
index 9768cf7e83d7..9696ddc238c9 100644
--- a/arch/arm/plat-versatile/sched-clock.c
+++ b/arch/arm/plat-versatile/sched-clock.c
@@ -20,6 +20,7 @@
20 */ 20 */
21#include <linux/cnt32_to_63.h> 21#include <linux/cnt32_to_63.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/sched.h>
23#include <asm/div64.h> 24#include <asm/div64.h>
24 25
25#include <mach/hardware.h> 26#include <mach/hardware.h>