aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-28 13:17:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-28 13:17:55 -0500
commite0d272429a34ff143bfa04ee8e29dd4eed2964c7 (patch)
tree5a719135b245811b5d61ed084d7b8c1bc2e87031 /arch
parentd25e8dbdab203ed8b4fd0a174bb5259e35ecd87c (diff)
parent480917427b0b6ff39de55ffc81391055472e6c26 (diff)
Merge branch 'tracing-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (28 commits) ftrace: Add function names to dangling } in function graph tracer tracing: Simplify memory recycle of trace_define_field tracing: Remove unnecessary variable in print_graph_return tracing: Fix typo of info text in trace_kprobe.c tracing: Fix typo in prof_sysexit_enable() tracing: Remove CONFIG_TRACE_POWER from kernel config tracing: Fix ftrace_event_call alignment for use with gcc 4.5 ftrace: Remove memory barriers from NMI code when not needed tracing/kprobes: Add short documentation for HAVE_REGS_AND_STACK_ACCESS_API s390: Add pt_regs register and stack access API tracing/kprobes: Make Kconfig dependencies generic tracing: Unify arch_syscall_addr() implementations tracing: Add notrace to TRACE_EVENT implementation functions ftrace: Allow to remove a single function from function graph filter tracing: Add correct/incorrect to sort keys for branch annotation output tracing: Simplify test for function_graph tracing start point tracing: Drop the tr check from the graph tracing path tracing: Add stack dump to trace_printk if stacktrace option is set tracing: Use appropriate perl constructs in recordmcount.pl tracing: optimize recordmcount.pl for offsets-handling ...
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig8
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/ptrace.h13
-rw-r--r--arch/s390/include/asm/syscall.h7
-rw-r--r--arch/s390/kernel/ftrace.c10
-rw-r--r--arch/s390/kernel/ptrace.c58
-rw-r--r--arch/sh/include/asm/syscall.h2
-rw-r--r--arch/sh/kernel/ftrace.c9
-rw-r--r--arch/sparc/include/asm/syscall.h7
-rw-r--r--arch/sparc/kernel/ftrace.c11
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/syscall.h2
-rw-r--r--arch/x86/kernel/ftrace.c36
13 files changed, 124 insertions, 41 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 06a13729c8df..215e46073c45 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -105,6 +105,14 @@ config HAVE_DMA_ATTRS
105config USE_GENERIC_SMP_HELPERS 105config USE_GENERIC_SMP_HELPERS
106 bool 106 bool
107 107
108config HAVE_REGS_AND_STACK_ACCESS_API
109 bool
110 help
111 This symbol should be selected by an architecure if it supports
112 the API needed to access registers and stack entries from pt_regs,
113 declared in asm/ptrace.h
114 For example the kprobes-based event tracer needs this API.
115
108config HAVE_CLK 116config HAVE_CLK
109 bool 117 bool
110 help 118 help
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 19deda8d8875..0d8cd9bbe101 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -90,6 +90,7 @@ config S390
90 select HAVE_SYSCALL_TRACEPOINTS 90 select HAVE_SYSCALL_TRACEPOINTS
91 select HAVE_DYNAMIC_FTRACE 91 select HAVE_DYNAMIC_FTRACE
92 select HAVE_FUNCTION_GRAPH_TRACER 92 select HAVE_FUNCTION_GRAPH_TRACER
93 select HAVE_REGS_AND_STACK_ACCESS_API
93 select HAVE_DEFAULT_NO_SPIN_MUTEXES 94 select HAVE_DEFAULT_NO_SPIN_MUTEXES
94 select HAVE_OPROFILE 95 select HAVE_OPROFILE
95 select HAVE_KPROBES 96 select HAVE_KPROBES
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 95dcf183a28d..dd2d913afcae 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -492,13 +492,24 @@ struct user_regs_struct
492struct task_struct; 492struct task_struct;
493extern void user_enable_single_step(struct task_struct *); 493extern void user_enable_single_step(struct task_struct *);
494extern void user_disable_single_step(struct task_struct *); 494extern void user_disable_single_step(struct task_struct *);
495extern void show_regs(struct pt_regs * regs);
495 496
496#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) 497#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
497#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) 498#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
498#define user_stack_pointer(regs)((regs)->gprs[15]) 499#define user_stack_pointer(regs)((regs)->gprs[15])
499#define regs_return_value(regs)((regs)->gprs[2]) 500#define regs_return_value(regs)((regs)->gprs[2])
500#define profile_pc(regs) instruction_pointer(regs) 501#define profile_pc(regs) instruction_pointer(regs)
501extern void show_regs(struct pt_regs * regs); 502
503int regs_query_register_offset(const char *name);
504const char *regs_query_register_name(unsigned int offset);
505unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
506unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
507
508static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
509{
510 return regs->gprs[15] & PSW_ADDR_INSN;
511}
512
502#endif /* __KERNEL__ */ 513#endif /* __KERNEL__ */
503#endif /* __ASSEMBLY__ */ 514#endif /* __ASSEMBLY__ */
504 515
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index e0a73d3eb837..8429686951f9 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -15,6 +15,13 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <asm/ptrace.h> 16#include <asm/ptrace.h>
17 17
18/*
19 * The syscall table always contains 32 bit pointers since we know that the
20 * address of the function to be called is (way) below 4GB. So the "int"
21 * type here is what we want [need] for both 32 bit and 64 bit systems.
22 */
23extern const unsigned int sys_call_table[];
24
18static inline long syscall_get_nr(struct task_struct *task, 25static inline long syscall_get_nr(struct task_struct *task,
19 struct pt_regs *regs) 26 struct pt_regs *regs)
20{ 27{
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 314d8f09cf31..6a83d0581317 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -200,13 +200,3 @@ out:
200 return parent; 200 return parent;
201} 201}
202#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 202#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
203
204#ifdef CONFIG_FTRACE_SYSCALLS
205
206extern unsigned int sys_call_table[];
207
208unsigned long __init arch_syscall_addr(int nr)
209{
210 return (unsigned long)sys_call_table[nr];
211}
212#endif
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 7cf464234419..33fdc5a79764 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -992,3 +992,61 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
992#endif 992#endif
993 return &user_s390_view; 993 return &user_s390_view;
994} 994}
995
996static const char *gpr_names[NUM_GPRS] = {
997 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
998 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
999};
1000
1001unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1002{
1003 if (offset >= NUM_GPRS)
1004 return 0;
1005 return regs->gprs[offset];
1006}
1007
1008int regs_query_register_offset(const char *name)
1009{
1010 unsigned long offset;
1011
1012 if (!name || *name != 'r')
1013 return -EINVAL;
1014 if (strict_strtoul(name + 1, 10, &offset))
1015 return -EINVAL;
1016 if (offset >= NUM_GPRS)
1017 return -EINVAL;
1018 return offset;
1019}
1020
1021const char *regs_query_register_name(unsigned int offset)
1022{
1023 if (offset >= NUM_GPRS)
1024 return NULL;
1025 return gpr_names[offset];
1026}
1027
1028static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1029{
1030 unsigned long ksp = kernel_stack_pointer(regs);
1031
1032 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1033}
1034
1035/**
1036 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1037 * @regs:pt_regs which contains kernel stack pointer.
1038 * @n:stack entry number.
1039 *
1040 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1041 * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1042 * this returns 0.
1043 */
1044unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1045{
1046 unsigned long addr;
1047
1048 addr = kernel_stack_pointer(regs) + n * sizeof(long);
1049 if (!regs_within_kernel_stack(regs, addr))
1050 return 0;
1051 return *(unsigned long *)addr;
1052}
diff --git a/arch/sh/include/asm/syscall.h b/arch/sh/include/asm/syscall.h
index 6a381429ee9d..aa7777bdc370 100644
--- a/arch/sh/include/asm/syscall.h
+++ b/arch/sh/include/asm/syscall.h
@@ -1,6 +1,8 @@
1#ifndef __ASM_SH_SYSCALL_H 1#ifndef __ASM_SH_SYSCALL_H
2#define __ASM_SH_SYSCALL_H 2#define __ASM_SH_SYSCALL_H
3 3
4extern const unsigned long sys_call_table[];
5
4#ifdef CONFIG_SUPERH32 6#ifdef CONFIG_SUPERH32
5# include "syscall_32.h" 7# include "syscall_32.h"
6#else 8#else
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index a48cdedc73b5..30e13196d35b 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -399,12 +399,3 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
399 } 399 }
400} 400}
401#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 401#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
402
403#ifdef CONFIG_FTRACE_SYSCALLS
404extern unsigned long *sys_call_table;
405
406unsigned long __init arch_syscall_addr(int nr)
407{
408 return (unsigned long)sys_call_table[nr];
409}
410#endif /* CONFIG_FTRACE_SYSCALLS */
diff --git a/arch/sparc/include/asm/syscall.h b/arch/sparc/include/asm/syscall.h
index 7486c605e23c..025a02ad2e31 100644
--- a/arch/sparc/include/asm/syscall.h
+++ b/arch/sparc/include/asm/syscall.h
@@ -5,6 +5,13 @@
5#include <linux/sched.h> 5#include <linux/sched.h>
6#include <asm/ptrace.h> 6#include <asm/ptrace.h>
7 7
8/*
9 * The syscall table always contains 32 bit pointers since we know that the
10 * address of the function to be called is (way) below 4GB. So the "int"
11 * type here is what we want [need] for both 32 bit and 64 bit systems.
12 */
13extern const unsigned int sys_call_table[];
14
8/* The system call number is given by the user in %g1 */ 15/* The system call number is given by the user in %g1 */
9static inline long syscall_get_nr(struct task_struct *task, 16static inline long syscall_get_nr(struct task_struct *task,
10 struct pt_regs *regs) 17 struct pt_regs *regs)
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 29973daa9930..9103a56b39e8 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -91,14 +91,3 @@ int __init ftrace_dyn_arch_init(void *data)
91 return 0; 91 return 0;
92} 92}
93#endif 93#endif
94
95#ifdef CONFIG_FTRACE_SYSCALLS
96
97extern unsigned int sys_call_table[];
98
99unsigned long __init arch_syscall_addr(int nr)
100{
101 return (unsigned long)sys_call_table[nr];
102}
103
104#endif
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index eb4092568f9e..0896008f7509 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -45,6 +45,7 @@ config X86
45 select HAVE_GENERIC_DMA_COHERENT if X86_32 45 select HAVE_GENERIC_DMA_COHERENT if X86_32
46 select HAVE_EFFICIENT_UNALIGNED_ACCESS 46 select HAVE_EFFICIENT_UNALIGNED_ACCESS
47 select USER_STACKTRACE_SUPPORT 47 select USER_STACKTRACE_SUPPORT
48 select HAVE_REGS_AND_STACK_ACCESS_API
48 select HAVE_DMA_API_DEBUG 49 select HAVE_DMA_API_DEBUG
49 select HAVE_KERNEL_GZIP 50 select HAVE_KERNEL_GZIP
50 select HAVE_KERNEL_BZIP2 51 select HAVE_KERNEL_BZIP2
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 8d33bc5462d1..c4a348f7bd43 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -16,6 +16,8 @@
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/err.h> 17#include <linux/err.h>
18 18
19extern const unsigned long sys_call_table[];
20
19/* 21/*
20 * Only the low 32 bits of orig_ax are meaningful, so we return int. 22 * Only the low 32 bits of orig_ax are meaningful, so we return int.
21 * This importantly ignores the high bits on 64-bit, so comparisons 23 * This importantly ignores the high bits on 64-bit, so comparisons
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 309689245431..cd37469b54ee 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -30,14 +30,32 @@
30 30
31#ifdef CONFIG_DYNAMIC_FTRACE 31#ifdef CONFIG_DYNAMIC_FTRACE
32 32
33/*
34 * modifying_code is set to notify NMIs that they need to use
35 * memory barriers when entering or exiting. But we don't want
36 * to burden NMIs with unnecessary memory barriers when code
37 * modification is not being done (which is most of the time).
38 *
39 * A mutex is already held when ftrace_arch_code_modify_prepare
40 * and post_process are called. No locks need to be taken here.
41 *
42 * Stop machine will make sure currently running NMIs are done
43 * and new NMIs will see the updated variable before we need
44 * to worry about NMIs doing memory barriers.
45 */
46static int modifying_code __read_mostly;
47static DEFINE_PER_CPU(int, save_modifying_code);
48
33int ftrace_arch_code_modify_prepare(void) 49int ftrace_arch_code_modify_prepare(void)
34{ 50{
35 set_kernel_text_rw(); 51 set_kernel_text_rw();
52 modifying_code = 1;
36 return 0; 53 return 0;
37} 54}
38 55
39int ftrace_arch_code_modify_post_process(void) 56int ftrace_arch_code_modify_post_process(void)
40{ 57{
58 modifying_code = 0;
41 set_kernel_text_ro(); 59 set_kernel_text_ro();
42 return 0; 60 return 0;
43} 61}
@@ -149,6 +167,11 @@ static void ftrace_mod_code(void)
149 167
150void ftrace_nmi_enter(void) 168void ftrace_nmi_enter(void)
151{ 169{
170 __get_cpu_var(save_modifying_code) = modifying_code;
171
172 if (!__get_cpu_var(save_modifying_code))
173 return;
174
152 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { 175 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
153 smp_rmb(); 176 smp_rmb();
154 ftrace_mod_code(); 177 ftrace_mod_code();
@@ -160,6 +183,9 @@ void ftrace_nmi_enter(void)
160 183
161void ftrace_nmi_exit(void) 184void ftrace_nmi_exit(void)
162{ 185{
186 if (!__get_cpu_var(save_modifying_code))
187 return;
188
163 /* Finish all executions before clearing nmi_running */ 189 /* Finish all executions before clearing nmi_running */
164 smp_mb(); 190 smp_mb();
165 atomic_dec(&nmi_running); 191 atomic_dec(&nmi_running);
@@ -484,13 +510,3 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
484 } 510 }
485} 511}
486#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 512#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
487
488#ifdef CONFIG_FTRACE_SYSCALLS
489
490extern unsigned long *sys_call_table;
491
492unsigned long __init arch_syscall_addr(int nr)
493{
494 return (unsigned long)(&sys_call_table)[nr];
495}
496#endif