aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/ftrace.h158
-rw-r--r--include/linux/kprobes.h27
-rw-r--r--include/linux/perf_event.h60
-rw-r--r--include/linux/perf_regs.h25
4 files changed, 255 insertions, 15 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 55e6d63d46d0..a52f2f4fe030 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -10,6 +10,7 @@
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <linux/bitops.h> 12#include <linux/bitops.h>
13#include <linux/ptrace.h>
13#include <linux/ktime.h> 14#include <linux/ktime.h>
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/types.h> 16#include <linux/types.h>
@@ -18,6 +19,28 @@
18 19
19#include <asm/ftrace.h> 20#include <asm/ftrace.h>
20 21
22/*
23 * If the arch supports passing the variable contents of
24 * function_trace_op as the third parameter back from the
25 * mcount call, then the arch should define this as 1.
26 */
27#ifndef ARCH_SUPPORTS_FTRACE_OPS
28#define ARCH_SUPPORTS_FTRACE_OPS 0
29#endif
30
31/*
32 * If the arch's mcount caller does not support all of ftrace's
33 * features, then it must call an indirect function that
34 * does. Or at least does enough to prevent any unwelcomed side effects.
35 */
36#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
37 !ARCH_SUPPORTS_FTRACE_OPS
38# define FTRACE_FORCE_LIST_FUNC 1
39#else
40# define FTRACE_FORCE_LIST_FUNC 0
41#endif
42
43
21struct module; 44struct module;
22struct ftrace_hash; 45struct ftrace_hash;
23 46
@@ -29,7 +52,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
29 void __user *buffer, size_t *lenp, 52 void __user *buffer, size_t *lenp,
30 loff_t *ppos); 53 loff_t *ppos);
31 54
32typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); 55struct ftrace_ops;
56
57typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
58 struct ftrace_ops *op, struct pt_regs *regs);
33 59
34/* 60/*
35 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are 61 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
@@ -45,12 +71,33 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
45 * could be controled by following calls: 71 * could be controled by following calls:
46 * ftrace_function_local_enable 72 * ftrace_function_local_enable
47 * ftrace_function_local_disable 73 * ftrace_function_local_disable
74 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
75 * and passed to the callback. If this flag is set, but the
76 * architecture does not support passing regs
77 * (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the
78 * ftrace_ops will fail to register, unless the next flag
79 * is set.
80 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
81 * handler can handle an arch that does not save regs
82 * (the handler tests if regs == NULL), then it can set
83 * this flag instead. It will not fail registering the ftrace_ops
84 * but, the regs field will be NULL if the arch does not support
85 * passing regs to the handler.
86 * Note, if this flag is set, the SAVE_REGS flag will automatically
87 * get set upon registering the ftrace_ops, if the arch supports it.
88 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
89 * that the call back has its own recursion protection. If it does
90 * not set this, then the ftrace infrastructure will add recursion
91 * protection for the caller.
48 */ 92 */
49enum { 93enum {
50 FTRACE_OPS_FL_ENABLED = 1 << 0, 94 FTRACE_OPS_FL_ENABLED = 1 << 0,
51 FTRACE_OPS_FL_GLOBAL = 1 << 1, 95 FTRACE_OPS_FL_GLOBAL = 1 << 1,
52 FTRACE_OPS_FL_DYNAMIC = 1 << 2, 96 FTRACE_OPS_FL_DYNAMIC = 1 << 2,
53 FTRACE_OPS_FL_CONTROL = 1 << 3, 97 FTRACE_OPS_FL_CONTROL = 1 << 3,
98 FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
99 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
100 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
54}; 101};
55 102
56struct ftrace_ops { 103struct ftrace_ops {
@@ -163,7 +210,8 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
163 return *this_cpu_ptr(ops->disabled); 210 return *this_cpu_ptr(ops->disabled);
164} 211}
165 212
166extern void ftrace_stub(unsigned long a0, unsigned long a1); 213extern void ftrace_stub(unsigned long a0, unsigned long a1,
214 struct ftrace_ops *op, struct pt_regs *regs);
167 215
168#else /* !CONFIG_FUNCTION_TRACER */ 216#else /* !CONFIG_FUNCTION_TRACER */
169/* 217/*
@@ -172,6 +220,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
172 */ 220 */
173#define register_ftrace_function(ops) ({ 0; }) 221#define register_ftrace_function(ops) ({ 0; })
174#define unregister_ftrace_function(ops) ({ 0; }) 222#define unregister_ftrace_function(ops) ({ 0; })
223static inline int ftrace_nr_registered_ops(void)
224{
225 return 0;
226}
175static inline void clear_ftrace_function(void) { } 227static inline void clear_ftrace_function(void) { }
176static inline void ftrace_kill(void) { } 228static inline void ftrace_kill(void) { }
177static inline void ftrace_stop(void) { } 229static inline void ftrace_stop(void) { }
@@ -227,12 +279,33 @@ extern void unregister_ftrace_function_probe_all(char *glob);
227 279
228extern int ftrace_text_reserved(void *start, void *end); 280extern int ftrace_text_reserved(void *start, void *end);
229 281
282extern int ftrace_nr_registered_ops(void);
283
284/*
285 * The dyn_ftrace record's flags field is split into two parts.
286 * the first part which is '0-FTRACE_REF_MAX' is a counter of
287 * the number of callbacks that have registered the function that
288 * the dyn_ftrace descriptor represents.
289 *
290 * The second part is a mask:
291 * ENABLED - the function is being traced
292 * REGS - the record wants the function to save regs
293 * REGS_EN - the function is set up to save regs.
294 *
295 * When a new ftrace_ops is registered and wants a function to save
296 * pt_regs, the rec->flag REGS is set. When the function has been
297 * set up to save regs, the REG_EN flag is set. Once a function
298 * starts saving regs it will do so until all ftrace_ops are removed
299 * from tracing that function.
300 */
230enum { 301enum {
231 FTRACE_FL_ENABLED = (1 << 30), 302 FTRACE_FL_ENABLED = (1UL << 29),
303 FTRACE_FL_REGS = (1UL << 30),
304 FTRACE_FL_REGS_EN = (1UL << 31)
232}; 305};
233 306
234#define FTRACE_FL_MASK (0x3UL << 30) 307#define FTRACE_FL_MASK (0x7UL << 29)
235#define FTRACE_REF_MAX ((1 << 30) - 1) 308#define FTRACE_REF_MAX ((1UL << 29) - 1)
236 309
237struct dyn_ftrace { 310struct dyn_ftrace {
238 union { 311 union {
@@ -244,6 +317,8 @@ struct dyn_ftrace {
244}; 317};
245 318
246int ftrace_force_update(void); 319int ftrace_force_update(void);
320int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
321 int remove, int reset);
247int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 322int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
248 int len, int reset); 323 int len, int reset);
249int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 324int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
@@ -263,9 +338,23 @@ enum {
263 FTRACE_STOP_FUNC_RET = (1 << 4), 338 FTRACE_STOP_FUNC_RET = (1 << 4),
264}; 339};
265 340
341/*
342 * The FTRACE_UPDATE_* enum is used to pass information back
343 * from the ftrace_update_record() and ftrace_test_record()
344 * functions. These are called by the code update routines
345 * to find out what is to be done for a given function.
346 *
347 * IGNORE - The function is already what we want it to be
348 * MAKE_CALL - Start tracing the function
349 * MODIFY_CALL - Stop saving regs for the function
350 * MODIFY_CALL_REGS - Start saving regs for the function
351 * MAKE_NOP - Stop tracing the function
352 */
266enum { 353enum {
267 FTRACE_UPDATE_IGNORE, 354 FTRACE_UPDATE_IGNORE,
268 FTRACE_UPDATE_MAKE_CALL, 355 FTRACE_UPDATE_MAKE_CALL,
356 FTRACE_UPDATE_MODIFY_CALL,
357 FTRACE_UPDATE_MODIFY_CALL_REGS,
269 FTRACE_UPDATE_MAKE_NOP, 358 FTRACE_UPDATE_MAKE_NOP,
270}; 359};
271 360
@@ -317,7 +406,9 @@ extern int ftrace_dyn_arch_init(void *data);
317extern void ftrace_replace_code(int enable); 406extern void ftrace_replace_code(int enable);
318extern int ftrace_update_ftrace_func(ftrace_func_t func); 407extern int ftrace_update_ftrace_func(ftrace_func_t func);
319extern void ftrace_caller(void); 408extern void ftrace_caller(void);
409extern void ftrace_regs_caller(void);
320extern void ftrace_call(void); 410extern void ftrace_call(void);
411extern void ftrace_regs_call(void);
321extern void mcount_call(void); 412extern void mcount_call(void);
322 413
323void ftrace_modify_all_code(int command); 414void ftrace_modify_all_code(int command);
@@ -325,6 +416,15 @@ void ftrace_modify_all_code(int command);
325#ifndef FTRACE_ADDR 416#ifndef FTRACE_ADDR
326#define FTRACE_ADDR ((unsigned long)ftrace_caller) 417#define FTRACE_ADDR ((unsigned long)ftrace_caller)
327#endif 418#endif
419
420#ifndef FTRACE_REGS_ADDR
421#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
422# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
423#else
424# define FTRACE_REGS_ADDR FTRACE_ADDR
425#endif
426#endif
427
328#ifdef CONFIG_FUNCTION_GRAPH_TRACER 428#ifdef CONFIG_FUNCTION_GRAPH_TRACER
329extern void ftrace_graph_caller(void); 429extern void ftrace_graph_caller(void);
330extern int ftrace_enable_ftrace_graph_caller(void); 430extern int ftrace_enable_ftrace_graph_caller(void);
@@ -380,6 +480,39 @@ extern int ftrace_make_nop(struct module *mod,
380 */ 480 */
381extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 481extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
382 482
483#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
484/**
485 * ftrace_modify_call - convert from one addr to another (no nop)
486 * @rec: the mcount call site record
487 * @old_addr: the address expected to be currently called to
488 * @addr: the address to change to
489 *
490 * This is a very sensitive operation and great care needs
491 * to be taken by the arch. The operation should carefully
492 * read the location, check to see if what is read is indeed
493 * what we expect it to be, and then on success of the compare,
494 * it should write to the location.
495 *
496 * The code segment at @rec->ip should be a caller to @old_addr
497 *
498 * Return must be:
499 * 0 on success
500 * -EFAULT on error reading the location
501 * -EINVAL on a failed compare of the contents
502 * -EPERM on error writing to the location
503 * Any other value will be considered a failure.
504 */
505extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
506 unsigned long addr);
507#else
508/* Should never be called */
509static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
510 unsigned long addr)
511{
512 return -EINVAL;
513}
514#endif
515
383/* May be defined in arch */ 516/* May be defined in arch */
384extern int ftrace_arch_read_dyn_info(char *buf, int size); 517extern int ftrace_arch_read_dyn_info(char *buf, int size);
385 518
@@ -387,7 +520,7 @@ extern int skip_trace(unsigned long ip);
387 520
388extern void ftrace_disable_daemon(void); 521extern void ftrace_disable_daemon(void);
389extern void ftrace_enable_daemon(void); 522extern void ftrace_enable_daemon(void);
390#else 523#else /* CONFIG_DYNAMIC_FTRACE */
391static inline int skip_trace(unsigned long ip) { return 0; } 524static inline int skip_trace(unsigned long ip) { return 0; }
392static inline int ftrace_force_update(void) { return 0; } 525static inline int ftrace_force_update(void) { return 0; }
393static inline void ftrace_disable_daemon(void) { } 526static inline void ftrace_disable_daemon(void) { }
@@ -405,6 +538,10 @@ static inline int ftrace_text_reserved(void *start, void *end)
405{ 538{
406 return 0; 539 return 0;
407} 540}
541static inline unsigned long ftrace_location(unsigned long ip)
542{
543 return 0;
544}
408 545
409/* 546/*
410 * Again users of functions that have ftrace_ops may not 547 * Again users of functions that have ftrace_ops may not
@@ -413,6 +550,7 @@ static inline int ftrace_text_reserved(void *start, void *end)
413 */ 550 */
414#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) 551#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
415#define ftrace_set_early_filter(ops, buf, enable) do { } while (0) 552#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
553#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
416#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) 554#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
417#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) 555#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
418#define ftrace_free_filter(ops) do { } while (0) 556#define ftrace_free_filter(ops) do { } while (0)
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index b6e1f8c00577..23755ba42abc 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -38,6 +38,7 @@
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <linux/rcupdate.h> 39#include <linux/rcupdate.h>
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/ftrace.h>
41 42
42#ifdef CONFIG_KPROBES 43#ifdef CONFIG_KPROBES
43#include <asm/kprobes.h> 44#include <asm/kprobes.h>
@@ -48,14 +49,26 @@
48#define KPROBE_REENTER 0x00000004 49#define KPROBE_REENTER 0x00000004
49#define KPROBE_HIT_SSDONE 0x00000008 50#define KPROBE_HIT_SSDONE 0x00000008
50 51
52/*
53 * If function tracer is enabled and the arch supports full
54 * passing of pt_regs to function tracing, then kprobes can
55 * optimize on top of function tracing.
56 */
57#if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \
58 && defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE)
59# define KPROBES_CAN_USE_FTRACE
60#endif
61
51/* Attach to insert probes on any functions which should be ignored*/ 62/* Attach to insert probes on any functions which should be ignored*/
52#define __kprobes __attribute__((__section__(".kprobes.text"))) 63#define __kprobes __attribute__((__section__(".kprobes.text")))
64
53#else /* CONFIG_KPROBES */ 65#else /* CONFIG_KPROBES */
54typedef int kprobe_opcode_t; 66typedef int kprobe_opcode_t;
55struct arch_specific_insn { 67struct arch_specific_insn {
56 int dummy; 68 int dummy;
57}; 69};
58#define __kprobes 70#define __kprobes
71
59#endif /* CONFIG_KPROBES */ 72#endif /* CONFIG_KPROBES */
60 73
61struct kprobe; 74struct kprobe;
@@ -128,6 +141,7 @@ struct kprobe {
128 * NOTE: 141 * NOTE:
129 * this flag is only for optimized_kprobe. 142 * this flag is only for optimized_kprobe.
130 */ 143 */
144#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
131 145
132/* Has this kprobe gone ? */ 146/* Has this kprobe gone ? */
133static inline int kprobe_gone(struct kprobe *p) 147static inline int kprobe_gone(struct kprobe *p)
@@ -146,6 +160,13 @@ static inline int kprobe_optimized(struct kprobe *p)
146{ 160{
147 return p->flags & KPROBE_FLAG_OPTIMIZED; 161 return p->flags & KPROBE_FLAG_OPTIMIZED;
148} 162}
163
164/* Is this kprobe uses ftrace ? */
165static inline int kprobe_ftrace(struct kprobe *p)
166{
167 return p->flags & KPROBE_FLAG_FTRACE;
168}
169
149/* 170/*
150 * Special probe type that uses setjmp-longjmp type tricks to resume 171 * Special probe type that uses setjmp-longjmp type tricks to resume
151 * execution at a specified entry with a matching prototype corresponding 172 * execution at a specified entry with a matching prototype corresponding
@@ -295,6 +316,12 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
295#endif 316#endif
296 317
297#endif /* CONFIG_OPTPROBES */ 318#endif /* CONFIG_OPTPROBES */
319#ifdef KPROBES_CAN_USE_FTRACE
320extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
321 struct ftrace_ops *ops, struct pt_regs *regs);
322extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
323#endif
324
298 325
299/* Get the kprobe at this addr (if any) - called with preemption disabled */ 326/* Get the kprobe at this addr (if any) - called with preemption disabled */
300struct kprobe *get_kprobe(void *addr); 327struct kprobe *get_kprobe(void *addr);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 7602ccb3f40e..28f9cee3fbc3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -130,8 +130,10 @@ enum perf_event_sample_format {
130 PERF_SAMPLE_STREAM_ID = 1U << 9, 130 PERF_SAMPLE_STREAM_ID = 1U << 9,
131 PERF_SAMPLE_RAW = 1U << 10, 131 PERF_SAMPLE_RAW = 1U << 10,
132 PERF_SAMPLE_BRANCH_STACK = 1U << 11, 132 PERF_SAMPLE_BRANCH_STACK = 1U << 11,
133 PERF_SAMPLE_REGS_USER = 1U << 12,
134 PERF_SAMPLE_STACK_USER = 1U << 13,
133 135
134 PERF_SAMPLE_MAX = 1U << 12, /* non-ABI */ 136 PERF_SAMPLE_MAX = 1U << 14, /* non-ABI */
135}; 137};
136 138
137/* 139/*
@@ -163,6 +165,15 @@ enum perf_branch_sample_type {
163 PERF_SAMPLE_BRANCH_HV) 165 PERF_SAMPLE_BRANCH_HV)
164 166
165/* 167/*
168 * Values to determine ABI of the registers dump.
169 */
170enum perf_sample_regs_abi {
171 PERF_SAMPLE_REGS_ABI_NONE = 0,
172 PERF_SAMPLE_REGS_ABI_32 = 1,
173 PERF_SAMPLE_REGS_ABI_64 = 2,
174};
175
176/*
166 * The format of the data returned by read() on a perf event fd, 177 * The format of the data returned by read() on a perf event fd,
167 * as specified by attr.read_format: 178 * as specified by attr.read_format:
168 * 179 *
@@ -194,6 +205,8 @@ enum perf_event_read_format {
194#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 205#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
195#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ 206#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
196#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ 207#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
208#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
209 /* add: sample_stack_user */
197 210
198/* 211/*
199 * Hardware event_id to monitor via a performance monitoring event: 212 * Hardware event_id to monitor via a performance monitoring event:
@@ -255,7 +268,10 @@ struct perf_event_attr {
255 exclude_host : 1, /* don't count in host */ 268 exclude_host : 1, /* don't count in host */
256 exclude_guest : 1, /* don't count in guest */ 269 exclude_guest : 1, /* don't count in guest */
257 270
258 __reserved_1 : 43; 271 exclude_callchain_kernel : 1, /* exclude kernel callchains */
272 exclude_callchain_user : 1, /* exclude user callchains */
273
274 __reserved_1 : 41;
259 275
260 union { 276 union {
261 __u32 wakeup_events; /* wakeup every n events */ 277 __u32 wakeup_events; /* wakeup every n events */
@@ -271,7 +287,21 @@ struct perf_event_attr {
271 __u64 bp_len; 287 __u64 bp_len;
272 __u64 config2; /* extension of config1 */ 288 __u64 config2; /* extension of config1 */
273 }; 289 };
274 __u64 branch_sample_type; /* enum branch_sample_type */ 290 __u64 branch_sample_type; /* enum perf_branch_sample_type */
291
292 /*
293 * Defines set of user regs to dump on samples.
294 * See asm/perf_regs.h for details.
295 */
296 __u64 sample_regs_user;
297
298 /*
299 * Defines size of the user stack to dump on samples.
300 */
301 __u32 sample_stack_user;
302
303 /* Align to u64. */
304 __u32 __reserved_2;
275}; 305};
276 306
277/* 307/*
@@ -548,6 +578,13 @@ enum perf_event_type {
548 * char data[size];}&& PERF_SAMPLE_RAW 578 * char data[size];}&& PERF_SAMPLE_RAW
549 * 579 *
550 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK 580 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
581 *
582 * { u64 abi; # enum perf_sample_regs_abi
583 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
584 *
585 * { u64 size;
586 * char data[size];
587 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
551 * }; 588 * };
552 */ 589 */
553 PERF_RECORD_SAMPLE = 9, 590 PERF_RECORD_SAMPLE = 9,
@@ -609,6 +646,7 @@ struct perf_guest_info_callbacks {
609#include <linux/static_key.h> 646#include <linux/static_key.h>
610#include <linux/atomic.h> 647#include <linux/atomic.h>
611#include <linux/sysfs.h> 648#include <linux/sysfs.h>
649#include <linux/perf_regs.h>
612#include <asm/local.h> 650#include <asm/local.h>
613 651
614struct perf_callchain_entry { 652struct perf_callchain_entry {
@@ -654,6 +692,11 @@ struct perf_branch_stack {
654 struct perf_branch_entry entries[0]; 692 struct perf_branch_entry entries[0];
655}; 693};
656 694
695struct perf_regs_user {
696 __u64 abi;
697 struct pt_regs *regs;
698};
699
657struct task_struct; 700struct task_struct;
658 701
659/* 702/*
@@ -1133,6 +1176,8 @@ struct perf_sample_data {
1133 struct perf_callchain_entry *callchain; 1176 struct perf_callchain_entry *callchain;
1134 struct perf_raw_record *raw; 1177 struct perf_raw_record *raw;
1135 struct perf_branch_stack *br_stack; 1178 struct perf_branch_stack *br_stack;
1179 struct perf_regs_user regs_user;
1180 u64 stack_user_size;
1136}; 1181};
1137 1182
1138static inline void perf_sample_data_init(struct perf_sample_data *data, 1183static inline void perf_sample_data_init(struct perf_sample_data *data,
@@ -1142,7 +1187,10 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
1142 data->addr = addr; 1187 data->addr = addr;
1143 data->raw = NULL; 1188 data->raw = NULL;
1144 data->br_stack = NULL; 1189 data->br_stack = NULL;
1145 data->period = period; 1190 data->period = period;
1191 data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE;
1192 data->regs_user.regs = NULL;
1193 data->stack_user_size = 0;
1146} 1194}
1147 1195
1148extern void perf_output_sample(struct perf_output_handle *handle, 1196extern void perf_output_sample(struct perf_output_handle *handle,
@@ -1290,8 +1338,10 @@ static inline bool has_branch_stack(struct perf_event *event)
1290extern int perf_output_begin(struct perf_output_handle *handle, 1338extern int perf_output_begin(struct perf_output_handle *handle,
1291 struct perf_event *event, unsigned int size); 1339 struct perf_event *event, unsigned int size);
1292extern void perf_output_end(struct perf_output_handle *handle); 1340extern void perf_output_end(struct perf_output_handle *handle);
1293extern void perf_output_copy(struct perf_output_handle *handle, 1341extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1294 const void *buf, unsigned int len); 1342 const void *buf, unsigned int len);
1343extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1344 unsigned int len);
1295extern int perf_swevent_get_recursion_context(void); 1345extern int perf_swevent_get_recursion_context(void);
1296extern void perf_swevent_put_recursion_context(int rctx); 1346extern void perf_swevent_put_recursion_context(int rctx);
1297extern void perf_event_enable(struct perf_event *event); 1347extern void perf_event_enable(struct perf_event *event);
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
new file mode 100644
index 000000000000..3c73d5fe18be
--- /dev/null
+++ b/include/linux/perf_regs.h
@@ -0,0 +1,25 @@
1#ifndef _LINUX_PERF_REGS_H
2#define _LINUX_PERF_REGS_H
3
4#ifdef CONFIG_HAVE_PERF_REGS
5#include <asm/perf_regs.h>
6u64 perf_reg_value(struct pt_regs *regs, int idx);
7int perf_reg_validate(u64 mask);
8u64 perf_reg_abi(struct task_struct *task);
9#else
10static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
11{
12 return 0;
13}
14
15static inline int perf_reg_validate(u64 mask)
16{
17 return mask ? -ENOSYS : 0;
18}
19
20static inline u64 perf_reg_abi(struct task_struct *task)
21{
22 return PERF_SAMPLE_REGS_ABI_NONE;
23}
24#endif /* CONFIG_HAVE_PERF_REGS */
25#endif /* _LINUX_PERF_REGS_H */