aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/ftrace.h77
-rw-r--r--include/linux/ftrace_event.h9
-rw-r--r--include/linux/interrupt.h7
-rw-r--r--include/linux/jump_label.h162
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--include/linux/netfilter.h6
-rw-r--r--include/linux/perf_event.h108
-rw-r--r--include/linux/static_key.h1
-rw-r--r--include/linux/tracepoint.h28
-rw-r--r--include/net/sock.h6
-rw-r--r--include/trace/events/power.h2
-rw-r--r--include/trace/events/printk.h41
-rw-r--r--include/trace/events/sched.h27
-rw-r--r--include/trace/events/signal.h85
14 files changed, 425 insertions, 138 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 028e26f0bf08..72a6cabb4d5b 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -31,16 +31,33 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
31 31
32typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); 32typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
33 33
34/*
35 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
36 * set in the flags member.
37 *
38 * ENABLED - set/unset when ftrace_ops is registered/unregistered
39 * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
40 * is part of the global tracers sharing the same filter
41 * via set_ftrace_* debugfs files.
42 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
43 * allocated ftrace_ops which need special care
44 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
45 * could be controled by following calls:
46 * ftrace_function_local_enable
47 * ftrace_function_local_disable
48 */
34enum { 49enum {
35 FTRACE_OPS_FL_ENABLED = 1 << 0, 50 FTRACE_OPS_FL_ENABLED = 1 << 0,
36 FTRACE_OPS_FL_GLOBAL = 1 << 1, 51 FTRACE_OPS_FL_GLOBAL = 1 << 1,
37 FTRACE_OPS_FL_DYNAMIC = 1 << 2, 52 FTRACE_OPS_FL_DYNAMIC = 1 << 2,
53 FTRACE_OPS_FL_CONTROL = 1 << 3,
38}; 54};
39 55
40struct ftrace_ops { 56struct ftrace_ops {
41 ftrace_func_t func; 57 ftrace_func_t func;
42 struct ftrace_ops *next; 58 struct ftrace_ops *next;
43 unsigned long flags; 59 unsigned long flags;
60 int __percpu *disabled;
44#ifdef CONFIG_DYNAMIC_FTRACE 61#ifdef CONFIG_DYNAMIC_FTRACE
45 struct ftrace_hash *notrace_hash; 62 struct ftrace_hash *notrace_hash;
46 struct ftrace_hash *filter_hash; 63 struct ftrace_hash *filter_hash;
@@ -97,6 +114,55 @@ int register_ftrace_function(struct ftrace_ops *ops);
97int unregister_ftrace_function(struct ftrace_ops *ops); 114int unregister_ftrace_function(struct ftrace_ops *ops);
98void clear_ftrace_function(void); 115void clear_ftrace_function(void);
99 116
117/**
118 * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
119 *
120 * This function enables tracing on current cpu by decreasing
121 * the per cpu control variable.
122 * It must be called with preemption disabled and only on ftrace_ops
123 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
124 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
125 */
126static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
127{
128 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
129 return;
130
131 (*this_cpu_ptr(ops->disabled))--;
132}
133
134/**
135 * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
136 *
137 * This function enables tracing on current cpu by decreasing
138 * the per cpu control variable.
139 * It must be called with preemption disabled and only on ftrace_ops
140 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
141 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
142 */
143static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
144{
145 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
146 return;
147
148 (*this_cpu_ptr(ops->disabled))++;
149}
150
151/**
152 * ftrace_function_local_disabled - returns ftrace_ops disabled value
153 * on current cpu
154 *
155 * This function returns value of ftrace_ops::disabled on current cpu.
156 * It must be called with preemption disabled and only on ftrace_ops
157 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
158 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
159 */
160static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
161{
162 WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
163 return *this_cpu_ptr(ops->disabled);
164}
165
100extern void ftrace_stub(unsigned long a0, unsigned long a1); 166extern void ftrace_stub(unsigned long a0, unsigned long a1);
101 167
102#else /* !CONFIG_FUNCTION_TRACER */ 168#else /* !CONFIG_FUNCTION_TRACER */
@@ -178,12 +244,13 @@ struct dyn_ftrace {
178}; 244};
179 245
180int ftrace_force_update(void); 246int ftrace_force_update(void);
181void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 247int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
182 int len, int reset); 248 int len, int reset);
183void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 249int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
184 int len, int reset); 250 int len, int reset);
185void ftrace_set_global_filter(unsigned char *buf, int len, int reset); 251void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
186void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); 252void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
253void ftrace_free_filter(struct ftrace_ops *ops);
187 254
188int register_ftrace_command(struct ftrace_func_command *cmd); 255int register_ftrace_command(struct ftrace_func_command *cmd);
189int unregister_ftrace_command(struct ftrace_func_command *cmd); 256int unregister_ftrace_command(struct ftrace_func_command *cmd);
@@ -314,9 +381,6 @@ extern void ftrace_enable_daemon(void);
314#else 381#else
315static inline int skip_trace(unsigned long ip) { return 0; } 382static inline int skip_trace(unsigned long ip) { return 0; }
316static inline int ftrace_force_update(void) { return 0; } 383static inline int ftrace_force_update(void) { return 0; }
317static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
318{
319}
320static inline void ftrace_disable_daemon(void) { } 384static inline void ftrace_disable_daemon(void) { }
321static inline void ftrace_enable_daemon(void) { } 385static inline void ftrace_enable_daemon(void) { }
322static inline void ftrace_release_mod(struct module *mod) {} 386static inline void ftrace_release_mod(struct module *mod) {}
@@ -340,6 +404,9 @@ static inline int ftrace_text_reserved(void *start, void *end)
340 */ 404 */
341#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) 405#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
342#define ftrace_set_early_filter(ops, buf, enable) do { } while (0) 406#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
407#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
408#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
409#define ftrace_free_filter(ops) do { } while (0)
343 410
344static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 411static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
345 size_t cnt, loff_t *ppos) { return -ENODEV; } 412 size_t cnt, loff_t *ppos) { return -ENODEV; }
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index c3da42dd22ba..dd478fc8f9f5 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -146,6 +146,10 @@ enum trace_reg {
146 TRACE_REG_UNREGISTER, 146 TRACE_REG_UNREGISTER,
147 TRACE_REG_PERF_REGISTER, 147 TRACE_REG_PERF_REGISTER,
148 TRACE_REG_PERF_UNREGISTER, 148 TRACE_REG_PERF_UNREGISTER,
149 TRACE_REG_PERF_OPEN,
150 TRACE_REG_PERF_CLOSE,
151 TRACE_REG_PERF_ADD,
152 TRACE_REG_PERF_DEL,
149}; 153};
150 154
151struct ftrace_event_call; 155struct ftrace_event_call;
@@ -157,7 +161,7 @@ struct ftrace_event_class {
157 void *perf_probe; 161 void *perf_probe;
158#endif 162#endif
159 int (*reg)(struct ftrace_event_call *event, 163 int (*reg)(struct ftrace_event_call *event,
160 enum trace_reg type); 164 enum trace_reg type, void *data);
161 int (*define_fields)(struct ftrace_event_call *); 165 int (*define_fields)(struct ftrace_event_call *);
162 struct list_head *(*get_fields)(struct ftrace_event_call *); 166 struct list_head *(*get_fields)(struct ftrace_event_call *);
163 struct list_head fields; 167 struct list_head fields;
@@ -165,7 +169,7 @@ struct ftrace_event_class {
165}; 169};
166 170
167extern int ftrace_event_reg(struct ftrace_event_call *event, 171extern int ftrace_event_reg(struct ftrace_event_call *event,
168 enum trace_reg type); 172 enum trace_reg type, void *data);
169 173
170enum { 174enum {
171 TRACE_EVENT_FL_ENABLED_BIT, 175 TRACE_EVENT_FL_ENABLED_BIT,
@@ -241,6 +245,7 @@ enum {
241 FILTER_STATIC_STRING, 245 FILTER_STATIC_STRING,
242 FILTER_DYN_STRING, 246 FILTER_DYN_STRING,
243 FILTER_PTR_STRING, 247 FILTER_PTR_STRING,
248 FILTER_TRACE_FN,
244}; 249};
245 250
246#define EVENT_STORAGE_SIZE 128 251#define EVENT_STORAGE_SIZE 128
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a64b00e286f5..3f830e005118 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -20,7 +20,6 @@
20#include <linux/atomic.h> 20#include <linux/atomic.h>
21#include <asm/ptrace.h> 21#include <asm/ptrace.h>
22#include <asm/system.h> 22#include <asm/system.h>
23#include <trace/events/irq.h>
24 23
25/* 24/*
26 * These correspond to the IORESOURCE_IRQ_* defines in 25 * These correspond to the IORESOURCE_IRQ_* defines in
@@ -456,11 +455,7 @@ asmlinkage void do_softirq(void);
456asmlinkage void __do_softirq(void); 455asmlinkage void __do_softirq(void);
457extern void open_softirq(int nr, void (*action)(struct softirq_action *)); 456extern void open_softirq(int nr, void (*action)(struct softirq_action *));
458extern void softirq_init(void); 457extern void softirq_init(void);
459static inline void __raise_softirq_irqoff(unsigned int nr) 458extern void __raise_softirq_irqoff(unsigned int nr);
460{
461 trace_softirq_raise(nr);
462 or_softirq_pending(1UL << nr);
463}
464 459
465extern void raise_softirq_irqoff(unsigned int nr); 460extern void raise_softirq_irqoff(unsigned int nr);
466extern void raise_softirq(unsigned int nr); 461extern void raise_softirq(unsigned int nr);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 5ce8b140428f..c513a40510f5 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -1,22 +1,69 @@
1#ifndef _LINUX_JUMP_LABEL_H 1#ifndef _LINUX_JUMP_LABEL_H
2#define _LINUX_JUMP_LABEL_H 2#define _LINUX_JUMP_LABEL_H
3 3
4/*
5 * Jump label support
6 *
7 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
8 * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
9 *
10 * Jump labels provide an interface to generate dynamic branches using
11 * self-modifying code. Assuming toolchain and architecture support the result
12 * of a "if (static_key_false(&key))" statement is a unconditional branch (which
13 * defaults to false - and the true block is placed out of line).
14 *
15 * However at runtime we can change the branch target using
16 * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
17 * object and for as long as there are references all branches referring to
18 * that particular key will point to the (out of line) true block.
19 *
20 * Since this relies on modifying code the static_key_slow_{inc,dec}() functions
21 * must be considered absolute slow paths (machine wide synchronization etc.).
22 * OTOH, since the affected branches are unconditional their runtime overhead
23 * will be absolutely minimal, esp. in the default (off) case where the total
24 * effect is a single NOP of appropriate size. The on case will patch in a jump
25 * to the out-of-line block.
26 *
27 * When the control is directly exposed to userspace it is prudent to delay the
28 * decrement to avoid high frequency code modifications which can (and do)
29 * cause significant performance degradation. Struct static_key_deferred and
30 * static_key_slow_dec_deferred() provide for this.
31 *
32 * Lacking toolchain and or architecture support, it falls back to a simple
33 * conditional branch.
34 *
35 * struct static_key my_key = STATIC_KEY_INIT_TRUE;
36 *
37 * if (static_key_true(&my_key)) {
38 * }
39 *
40 * will result in the true case being in-line and starts the key with a single
41 * reference. Mixing static_key_true() and static_key_false() on the same key is not
42 * allowed.
43 *
44 * Not initializing the key (static data is initialized to 0s anyway) is the
45 * same as using STATIC_KEY_INIT_FALSE and static_key_false() is
46 * equivalent with static_branch().
47 *
48*/
49
4#include <linux/types.h> 50#include <linux/types.h>
5#include <linux/compiler.h> 51#include <linux/compiler.h>
6#include <linux/workqueue.h> 52#include <linux/workqueue.h>
7 53
8#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) 54#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
9 55
10struct jump_label_key { 56struct static_key {
11 atomic_t enabled; 57 atomic_t enabled;
58/* Set lsb bit to 1 if branch is default true, 0 ot */
12 struct jump_entry *entries; 59 struct jump_entry *entries;
13#ifdef CONFIG_MODULES 60#ifdef CONFIG_MODULES
14 struct jump_label_mod *next; 61 struct static_key_mod *next;
15#endif 62#endif
16}; 63};
17 64
18struct jump_label_key_deferred { 65struct static_key_deferred {
19 struct jump_label_key key; 66 struct static_key key;
20 unsigned long timeout; 67 unsigned long timeout;
21 struct delayed_work work; 68 struct delayed_work work;
22}; 69};
@@ -34,13 +81,34 @@ struct module;
34 81
35#ifdef HAVE_JUMP_LABEL 82#ifdef HAVE_JUMP_LABEL
36 83
37#ifdef CONFIG_MODULES 84#define JUMP_LABEL_TRUE_BRANCH 1UL
38#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL, NULL} 85
39#else 86static
40#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL} 87inline struct jump_entry *jump_label_get_entries(struct static_key *key)
41#endif 88{
89 return (struct jump_entry *)((unsigned long)key->entries
90 & ~JUMP_LABEL_TRUE_BRANCH);
91}
42 92
43static __always_inline bool static_branch(struct jump_label_key *key) 93static inline bool jump_label_get_branch_default(struct static_key *key)
94{
95 if ((unsigned long)key->entries & JUMP_LABEL_TRUE_BRANCH)
96 return true;
97 return false;
98}
99
100static __always_inline bool static_key_false(struct static_key *key)
101{
102 return arch_static_branch(key);
103}
104
105static __always_inline bool static_key_true(struct static_key *key)
106{
107 return !static_key_false(key);
108}
109
110/* Deprecated. Please use 'static_key_false() instead. */
111static __always_inline bool static_branch(struct static_key *key)
44{ 112{
45 return arch_static_branch(key); 113 return arch_static_branch(key);
46} 114}
@@ -56,21 +124,23 @@ extern void arch_jump_label_transform(struct jump_entry *entry,
56extern void arch_jump_label_transform_static(struct jump_entry *entry, 124extern void arch_jump_label_transform_static(struct jump_entry *entry,
57 enum jump_label_type type); 125 enum jump_label_type type);
58extern int jump_label_text_reserved(void *start, void *end); 126extern int jump_label_text_reserved(void *start, void *end);
59extern void jump_label_inc(struct jump_label_key *key); 127extern void static_key_slow_inc(struct static_key *key);
60extern void jump_label_dec(struct jump_label_key *key); 128extern void static_key_slow_dec(struct static_key *key);
61extern void jump_label_dec_deferred(struct jump_label_key_deferred *key); 129extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
62extern bool jump_label_enabled(struct jump_label_key *key);
63extern void jump_label_apply_nops(struct module *mod); 130extern void jump_label_apply_nops(struct module *mod);
64extern void jump_label_rate_limit(struct jump_label_key_deferred *key, 131extern void
65 unsigned long rl); 132jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
133
134#define STATIC_KEY_INIT_TRUE ((struct static_key) \
135 { .enabled = ATOMIC_INIT(1), .entries = (void *)1 })
136#define STATIC_KEY_INIT_FALSE ((struct static_key) \
137 { .enabled = ATOMIC_INIT(0), .entries = (void *)0 })
66 138
67#else /* !HAVE_JUMP_LABEL */ 139#else /* !HAVE_JUMP_LABEL */
68 140
69#include <linux/atomic.h> 141#include <linux/atomic.h>
70 142
71#define JUMP_LABEL_INIT {ATOMIC_INIT(0)} 143struct static_key {
72
73struct jump_label_key {
74 atomic_t enabled; 144 atomic_t enabled;
75}; 145};
76 146
@@ -78,30 +148,45 @@ static __always_inline void jump_label_init(void)
78{ 148{
79} 149}
80 150
81struct jump_label_key_deferred { 151struct static_key_deferred {
82 struct jump_label_key key; 152 struct static_key key;
83}; 153};
84 154
85static __always_inline bool static_branch(struct jump_label_key *key) 155static __always_inline bool static_key_false(struct static_key *key)
156{
157 if (unlikely(atomic_read(&key->enabled)) > 0)
158 return true;
159 return false;
160}
161
162static __always_inline bool static_key_true(struct static_key *key)
86{ 163{
87 if (unlikely(atomic_read(&key->enabled))) 164 if (likely(atomic_read(&key->enabled)) > 0)
88 return true; 165 return true;
89 return false; 166 return false;
90} 167}
91 168
92static inline void jump_label_inc(struct jump_label_key *key) 169/* Deprecated. Please use 'static_key_false() instead. */
170static __always_inline bool static_branch(struct static_key *key)
171{
172 if (unlikely(atomic_read(&key->enabled)) > 0)
173 return true;
174 return false;
175}
176
177static inline void static_key_slow_inc(struct static_key *key)
93{ 178{
94 atomic_inc(&key->enabled); 179 atomic_inc(&key->enabled);
95} 180}
96 181
97static inline void jump_label_dec(struct jump_label_key *key) 182static inline void static_key_slow_dec(struct static_key *key)
98{ 183{
99 atomic_dec(&key->enabled); 184 atomic_dec(&key->enabled);
100} 185}
101 186
102static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key) 187static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
103{ 188{
104 jump_label_dec(&key->key); 189 static_key_slow_dec(&key->key);
105} 190}
106 191
107static inline int jump_label_text_reserved(void *start, void *end) 192static inline int jump_label_text_reserved(void *start, void *end)
@@ -112,23 +197,30 @@ static inline int jump_label_text_reserved(void *start, void *end)
112static inline void jump_label_lock(void) {} 197static inline void jump_label_lock(void) {}
113static inline void jump_label_unlock(void) {} 198static inline void jump_label_unlock(void) {}
114 199
115static inline bool jump_label_enabled(struct jump_label_key *key)
116{
117 return !!atomic_read(&key->enabled);
118}
119
120static inline int jump_label_apply_nops(struct module *mod) 200static inline int jump_label_apply_nops(struct module *mod)
121{ 201{
122 return 0; 202 return 0;
123} 203}
124 204
125static inline void jump_label_rate_limit(struct jump_label_key_deferred *key, 205static inline void
206jump_label_rate_limit(struct static_key_deferred *key,
126 unsigned long rl) 207 unsigned long rl)
127{ 208{
128} 209}
210
211#define STATIC_KEY_INIT_TRUE ((struct static_key) \
212 { .enabled = ATOMIC_INIT(1) })
213#define STATIC_KEY_INIT_FALSE ((struct static_key) \
214 { .enabled = ATOMIC_INIT(0) })
215
129#endif /* HAVE_JUMP_LABEL */ 216#endif /* HAVE_JUMP_LABEL */
130 217
131#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), }) 218#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
132#define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), }) 219#define jump_label_enabled static_key_enabled
220
221static inline bool static_key_enabled(struct static_key *key)
222{
223 return (atomic_read(&key->enabled) > 0);
224}
133 225
134#endif /* _LINUX_JUMP_LABEL_H */ 226#endif /* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0eac07c95255..7dfaae7846ab 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -214,8 +214,8 @@ enum {
214#include <linux/skbuff.h> 214#include <linux/skbuff.h>
215 215
216#ifdef CONFIG_RPS 216#ifdef CONFIG_RPS
217#include <linux/jump_label.h> 217#include <linux/static_key.h>
218extern struct jump_label_key rps_needed; 218extern struct static_key rps_needed;
219#endif 219#endif
220 220
221struct neighbour; 221struct neighbour;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index b809265607d0..29734be334c1 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -163,13 +163,13 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
163extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 163extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
164 164
165#if defined(CONFIG_JUMP_LABEL) 165#if defined(CONFIG_JUMP_LABEL)
166#include <linux/jump_label.h> 166#include <linux/static_key.h>
167extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 167extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
168static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) 168static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
169{ 169{
170 if (__builtin_constant_p(pf) && 170 if (__builtin_constant_p(pf) &&
171 __builtin_constant_p(hook)) 171 __builtin_constant_p(hook))
172 return static_branch(&nf_hooks_needed[pf][hook]); 172 return static_key_false(&nf_hooks_needed[pf][hook]);
173 173
174 return !list_empty(&nf_hooks[pf][hook]); 174 return !list_empty(&nf_hooks[pf][hook]);
175} 175}
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index abb2776be1ba..bd9f55a5958d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -129,11 +129,40 @@ enum perf_event_sample_format {
129 PERF_SAMPLE_PERIOD = 1U << 8, 129 PERF_SAMPLE_PERIOD = 1U << 8,
130 PERF_SAMPLE_STREAM_ID = 1U << 9, 130 PERF_SAMPLE_STREAM_ID = 1U << 9,
131 PERF_SAMPLE_RAW = 1U << 10, 131 PERF_SAMPLE_RAW = 1U << 10,
132 PERF_SAMPLE_BRANCH_STACK = 1U << 11,
132 133
133 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ 134 PERF_SAMPLE_MAX = 1U << 12, /* non-ABI */
134}; 135};
135 136
136/* 137/*
138 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
139 *
140 * If the user does not pass priv level information via branch_sample_type,
141 * the kernel uses the event's priv level. Branch and event priv levels do
142 * not have to match. Branch priv level is checked for permissions.
143 *
144 * The branch types can be combined, however BRANCH_ANY covers all types
145 * of branches and therefore it supersedes all the other types.
146 */
147enum perf_branch_sample_type {
148 PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */
149 PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */
150 PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */
151
152 PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */
153 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */
154 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */
155 PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */
156
157 PERF_SAMPLE_BRANCH_MAX = 1U << 7, /* non-ABI */
158};
159
160#define PERF_SAMPLE_BRANCH_PLM_ALL \
161 (PERF_SAMPLE_BRANCH_USER|\
162 PERF_SAMPLE_BRANCH_KERNEL|\
163 PERF_SAMPLE_BRANCH_HV)
164
165/*
137 * The format of the data returned by read() on a perf event fd, 166 * The format of the data returned by read() on a perf event fd,
138 * as specified by attr.read_format: 167 * as specified by attr.read_format:
139 * 168 *
@@ -163,6 +192,8 @@ enum perf_event_read_format {
163}; 192};
164 193
165#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 194#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
195#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
196#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
166 197
167/* 198/*
168 * Hardware event_id to monitor via a performance monitoring event: 199 * Hardware event_id to monitor via a performance monitoring event:
@@ -240,6 +271,7 @@ struct perf_event_attr {
240 __u64 bp_len; 271 __u64 bp_len;
241 __u64 config2; /* extension of config1 */ 272 __u64 config2; /* extension of config1 */
242 }; 273 };
274 __u64 branch_sample_type; /* enum branch_sample_type */
243}; 275};
244 276
245/* 277/*
@@ -291,12 +323,14 @@ struct perf_event_mmap_page {
291 __s64 offset; /* add to hardware event value */ 323 __s64 offset; /* add to hardware event value */
292 __u64 time_enabled; /* time event active */ 324 __u64 time_enabled; /* time event active */
293 __u64 time_running; /* time event on cpu */ 325 __u64 time_running; /* time event on cpu */
326 __u32 time_mult, time_shift;
327 __u64 time_offset;
294 328
295 /* 329 /*
296 * Hole for extension of the self monitor capabilities 330 * Hole for extension of the self monitor capabilities
297 */ 331 */
298 332
299 __u64 __reserved[123]; /* align to 1k */ 333 __u64 __reserved[121]; /* align to 1k */
300 334
301 /* 335 /*
302 * Control data for the mmap() data buffer. 336 * Control data for the mmap() data buffer.
@@ -456,6 +490,8 @@ enum perf_event_type {
456 * 490 *
457 * { u32 size; 491 * { u32 size;
458 * char data[size];}&& PERF_SAMPLE_RAW 492 * char data[size];}&& PERF_SAMPLE_RAW
493 *
494 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
459 * }; 495 * };
460 */ 496 */
461 PERF_RECORD_SAMPLE = 9, 497 PERF_RECORD_SAMPLE = 9,
@@ -512,7 +548,7 @@ struct perf_guest_info_callbacks {
512#include <linux/ftrace.h> 548#include <linux/ftrace.h>
513#include <linux/cpu.h> 549#include <linux/cpu.h>
514#include <linux/irq_work.h> 550#include <linux/irq_work.h>
515#include <linux/jump_label.h> 551#include <linux/static_key.h>
516#include <linux/atomic.h> 552#include <linux/atomic.h>
517#include <asm/local.h> 553#include <asm/local.h>
518 554
@@ -528,12 +564,34 @@ struct perf_raw_record {
528 void *data; 564 void *data;
529}; 565};
530 566
567/*
568 * single taken branch record layout:
569 *
570 * from: source instruction (may not always be a branch insn)
571 * to: branch target
572 * mispred: branch target was mispredicted
573 * predicted: branch target was predicted
574 *
575 * support for mispred, predicted is optional. In case it
576 * is not supported mispred = predicted = 0.
577 */
531struct perf_branch_entry { 578struct perf_branch_entry {
532 __u64 from; 579 __u64 from;
533 __u64 to; 580 __u64 to;
534 __u64 flags; 581 __u64 mispred:1, /* target mispredicted */
582 predicted:1,/* target predicted */
583 reserved:62;
535}; 584};
536 585
586/*
587 * branch stack layout:
588 * nr: number of taken branches stored in entries[]
589 *
590 * Note that nr can vary from sample to sample
591 * branches (to, from) are stored from most recent
592 * to least recent, i.e., entries[0] contains the most
593 * recent branch.
594 */
537struct perf_branch_stack { 595struct perf_branch_stack {
538 __u64 nr; 596 __u64 nr;
539 struct perf_branch_entry entries[0]; 597 struct perf_branch_entry entries[0];
@@ -564,7 +622,9 @@ struct hw_perf_event {
564 unsigned long event_base; 622 unsigned long event_base;
565 int idx; 623 int idx;
566 int last_cpu; 624 int last_cpu;
625
567 struct hw_perf_event_extra extra_reg; 626 struct hw_perf_event_extra extra_reg;
627 struct hw_perf_event_extra branch_reg;
568 }; 628 };
569 struct { /* software */ 629 struct { /* software */
570 struct hrtimer hrtimer; 630 struct hrtimer hrtimer;
@@ -616,6 +676,7 @@ struct pmu {
616 struct list_head entry; 676 struct list_head entry;
617 677
618 struct device *dev; 678 struct device *dev;
679 const struct attribute_group **attr_groups;
619 char *name; 680 char *name;
620 int type; 681 int type;
621 682
@@ -681,6 +742,17 @@ struct pmu {
681 * for each successful ->add() during the transaction. 742 * for each successful ->add() during the transaction.
682 */ 743 */
683 void (*cancel_txn) (struct pmu *pmu); /* optional */ 744 void (*cancel_txn) (struct pmu *pmu); /* optional */
745
746 /*
747 * Will return the value for perf_event_mmap_page::index for this event,
748 * if no implementation is provided it will default to: event->hw.idx + 1.
749 */
750 int (*event_idx) (struct perf_event *event); /*optional */
751
752 /*
753 * flush branch stack on context-switches (needed in cpu-wide mode)
754 */
755 void (*flush_branch_stack) (void);
684}; 756};
685 757
686/** 758/**
@@ -850,6 +922,9 @@ struct perf_event {
850#ifdef CONFIG_EVENT_TRACING 922#ifdef CONFIG_EVENT_TRACING
851 struct ftrace_event_call *tp_event; 923 struct ftrace_event_call *tp_event;
852 struct event_filter *filter; 924 struct event_filter *filter;
925#ifdef CONFIG_FUNCTION_TRACER
926 struct ftrace_ops ftrace_ops;
927#endif
853#endif 928#endif
854 929
855#ifdef CONFIG_CGROUP_PERF 930#ifdef CONFIG_CGROUP_PERF
@@ -911,7 +986,8 @@ struct perf_event_context {
911 u64 parent_gen; 986 u64 parent_gen;
912 u64 generation; 987 u64 generation;
913 int pin_count; 988 int pin_count;
914 int nr_cgroups; /* cgroup events present */ 989 int nr_cgroups; /* cgroup evts */
990 int nr_branch_stack; /* branch_stack evt */
915 struct rcu_head rcu_head; 991 struct rcu_head rcu_head;
916}; 992};
917 993
@@ -976,6 +1052,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
976extern u64 perf_event_read_value(struct perf_event *event, 1052extern u64 perf_event_read_value(struct perf_event *event,
977 u64 *enabled, u64 *running); 1053 u64 *enabled, u64 *running);
978 1054
1055
979struct perf_sample_data { 1056struct perf_sample_data {
980 u64 type; 1057 u64 type;
981 1058
@@ -995,12 +1072,14 @@ struct perf_sample_data {
995 u64 period; 1072 u64 period;
996 struct perf_callchain_entry *callchain; 1073 struct perf_callchain_entry *callchain;
997 struct perf_raw_record *raw; 1074 struct perf_raw_record *raw;
1075 struct perf_branch_stack *br_stack;
998}; 1076};
999 1077
1000static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) 1078static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
1001{ 1079{
1002 data->addr = addr; 1080 data->addr = addr;
1003 data->raw = NULL; 1081 data->raw = NULL;
1082 data->br_stack = NULL;
1004} 1083}
1005 1084
1006extern void perf_output_sample(struct perf_output_handle *handle, 1085extern void perf_output_sample(struct perf_output_handle *handle,
@@ -1029,7 +1108,7 @@ static inline int is_software_event(struct perf_event *event)
1029 return event->pmu->task_ctx_nr == perf_sw_context; 1108 return event->pmu->task_ctx_nr == perf_sw_context;
1030} 1109}
1031 1110
1032extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 1111extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1033 1112
1034extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); 1113extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1035 1114
@@ -1057,7 +1136,7 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1057{ 1136{
1058 struct pt_regs hot_regs; 1137 struct pt_regs hot_regs;
1059 1138
1060 if (static_branch(&perf_swevent_enabled[event_id])) { 1139 if (static_key_false(&perf_swevent_enabled[event_id])) {
1061 if (!regs) { 1140 if (!regs) {
1062 perf_fetch_caller_regs(&hot_regs); 1141 perf_fetch_caller_regs(&hot_regs);
1063 regs = &hot_regs; 1142 regs = &hot_regs;
@@ -1066,12 +1145,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1066 } 1145 }
1067} 1146}
1068 1147
1069extern struct jump_label_key_deferred perf_sched_events; 1148extern struct static_key_deferred perf_sched_events;
1070 1149
1071static inline void perf_event_task_sched_in(struct task_struct *prev, 1150static inline void perf_event_task_sched_in(struct task_struct *prev,
1072 struct task_struct *task) 1151 struct task_struct *task)
1073{ 1152{
1074 if (static_branch(&perf_sched_events.key)) 1153 if (static_key_false(&perf_sched_events.key))
1075 __perf_event_task_sched_in(prev, task); 1154 __perf_event_task_sched_in(prev, task);
1076} 1155}
1077 1156
@@ -1080,7 +1159,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
1080{ 1159{
1081 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 1160 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1082 1161
1083 if (static_branch(&perf_sched_events.key)) 1162 if (static_key_false(&perf_sched_events.key))
1084 __perf_event_task_sched_out(prev, next); 1163 __perf_event_task_sched_out(prev, next);
1085} 1164}
1086 1165
@@ -1139,6 +1218,11 @@ extern void perf_bp_event(struct perf_event *event, void *data);
1139# define perf_instruction_pointer(regs) instruction_pointer(regs) 1218# define perf_instruction_pointer(regs) instruction_pointer(regs)
1140#endif 1219#endif
1141 1220
1221static inline bool has_branch_stack(struct perf_event *event)
1222{
1223 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1224}
1225
1142extern int perf_output_begin(struct perf_output_handle *handle, 1226extern int perf_output_begin(struct perf_output_handle *handle,
1143 struct perf_event *event, unsigned int size); 1227 struct perf_event *event, unsigned int size);
1144extern void perf_output_end(struct perf_output_handle *handle); 1228extern void perf_output_end(struct perf_output_handle *handle);
diff --git a/include/linux/static_key.h b/include/linux/static_key.h
new file mode 100644
index 000000000000..27bd3f8a0857
--- /dev/null
+++ b/include/linux/static_key.h
@@ -0,0 +1 @@
#include <linux/jump_label.h>
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index df0a779c1bbd..bd96ecd0e05c 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -17,7 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/rcupdate.h> 19#include <linux/rcupdate.h>
20#include <linux/jump_label.h> 20#include <linux/static_key.h>
21 21
22struct module; 22struct module;
23struct tracepoint; 23struct tracepoint;
@@ -29,7 +29,7 @@ struct tracepoint_func {
29 29
30struct tracepoint { 30struct tracepoint {
31 const char *name; /* Tracepoint name */ 31 const char *name; /* Tracepoint name */
32 struct jump_label_key key; 32 struct static_key key;
33 void (*regfunc)(void); 33 void (*regfunc)(void);
34 void (*unregfunc)(void); 34 void (*unregfunc)(void);
35 struct tracepoint_func __rcu *funcs; 35 struct tracepoint_func __rcu *funcs;
@@ -114,7 +114,7 @@ static inline void tracepoint_synchronize_unregister(void)
114 * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just 114 * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
115 * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". 115 * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
116 */ 116 */
117#define __DO_TRACE(tp, proto, args, cond) \ 117#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \
118 do { \ 118 do { \
119 struct tracepoint_func *it_func_ptr; \ 119 struct tracepoint_func *it_func_ptr; \
120 void *it_func; \ 120 void *it_func; \
@@ -122,6 +122,7 @@ static inline void tracepoint_synchronize_unregister(void)
122 \ 122 \
123 if (!(cond)) \ 123 if (!(cond)) \
124 return; \ 124 return; \
125 prercu; \
125 rcu_read_lock_sched_notrace(); \ 126 rcu_read_lock_sched_notrace(); \
126 it_func_ptr = rcu_dereference_sched((tp)->funcs); \ 127 it_func_ptr = rcu_dereference_sched((tp)->funcs); \
127 if (it_func_ptr) { \ 128 if (it_func_ptr) { \
@@ -132,6 +133,7 @@ static inline void tracepoint_synchronize_unregister(void)
132 } while ((++it_func_ptr)->func); \ 133 } while ((++it_func_ptr)->func); \
133 } \ 134 } \
134 rcu_read_unlock_sched_notrace(); \ 135 rcu_read_unlock_sched_notrace(); \
136 postrcu; \
135 } while (0) 137 } while (0)
136 138
137/* 139/*
@@ -139,15 +141,25 @@ static inline void tracepoint_synchronize_unregister(void)
139 * not add unwanted padding between the beginning of the section and the 141 * not add unwanted padding between the beginning of the section and the
140 * structure. Force alignment to the same alignment as the section start. 142 * structure. Force alignment to the same alignment as the section start.
141 */ 143 */
142#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ 144#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
143 extern struct tracepoint __tracepoint_##name; \ 145 extern struct tracepoint __tracepoint_##name; \
144 static inline void trace_##name(proto) \ 146 static inline void trace_##name(proto) \
145 { \ 147 { \
148 if (static_key_false(&__tracepoint_##name.key)) \
149 __DO_TRACE(&__tracepoint_##name, \
150 TP_PROTO(data_proto), \
151 TP_ARGS(data_args), \
152 TP_CONDITION(cond),,); \
153 } \
154 static inline void trace_##name##_rcuidle(proto) \
155 { \
146 if (static_branch(&__tracepoint_##name.key)) \ 156 if (static_branch(&__tracepoint_##name.key)) \
147 __DO_TRACE(&__tracepoint_##name, \ 157 __DO_TRACE(&__tracepoint_##name, \
148 TP_PROTO(data_proto), \ 158 TP_PROTO(data_proto), \
149 TP_ARGS(data_args), \ 159 TP_ARGS(data_args), \
150 TP_CONDITION(cond)); \ 160 TP_CONDITION(cond), \
161 rcu_idle_exit(), \
162 rcu_idle_enter()); \
151 } \ 163 } \
152 static inline int \ 164 static inline int \
153 register_trace_##name(void (*probe)(data_proto), void *data) \ 165 register_trace_##name(void (*probe)(data_proto), void *data) \
@@ -176,7 +188,7 @@ static inline void tracepoint_synchronize_unregister(void)
176 __attribute__((section("__tracepoints_strings"))) = #name; \ 188 __attribute__((section("__tracepoints_strings"))) = #name; \
177 struct tracepoint __tracepoint_##name \ 189 struct tracepoint __tracepoint_##name \
178 __attribute__((section("__tracepoints"))) = \ 190 __attribute__((section("__tracepoints"))) = \
179 { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\ 191 { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
180 static struct tracepoint * const __tracepoint_ptr_##name __used \ 192 static struct tracepoint * const __tracepoint_ptr_##name __used \
181 __attribute__((section("__tracepoints_ptrs"))) = \ 193 __attribute__((section("__tracepoints_ptrs"))) = \
182 &__tracepoint_##name; 194 &__tracepoint_##name;
@@ -190,9 +202,11 @@ static inline void tracepoint_synchronize_unregister(void)
190 EXPORT_SYMBOL(__tracepoint_##name) 202 EXPORT_SYMBOL(__tracepoint_##name)
191 203
192#else /* !CONFIG_TRACEPOINTS */ 204#else /* !CONFIG_TRACEPOINTS */
193#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ 205#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
194 static inline void trace_##name(proto) \ 206 static inline void trace_##name(proto) \
195 { } \ 207 { } \
208 static inline void trace_##name##_rcuidle(proto) \
209 { } \
196 static inline int \ 210 static inline int \
197 register_trace_##name(void (*probe)(data_proto), \ 211 register_trace_##name(void (*probe)(data_proto), \
198 void *data) \ 212 void *data) \
diff --git a/include/net/sock.h b/include/net/sock.h
index 91c1c8baf020..dcde2d9268cd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -55,7 +55,7 @@
55#include <linux/uaccess.h> 55#include <linux/uaccess.h>
56#include <linux/memcontrol.h> 56#include <linux/memcontrol.h>
57#include <linux/res_counter.h> 57#include <linux/res_counter.h>
58#include <linux/jump_label.h> 58#include <linux/static_key.h>
59 59
60#include <linux/filter.h> 60#include <linux/filter.h>
61#include <linux/rculist_nulls.h> 61#include <linux/rculist_nulls.h>
@@ -924,13 +924,13 @@ inline void sk_refcnt_debug_release(const struct sock *sk)
924#endif /* SOCK_REFCNT_DEBUG */ 924#endif /* SOCK_REFCNT_DEBUG */
925 925
926#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET) 926#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET)
927extern struct jump_label_key memcg_socket_limit_enabled; 927extern struct static_key memcg_socket_limit_enabled;
928static inline struct cg_proto *parent_cg_proto(struct proto *proto, 928static inline struct cg_proto *parent_cg_proto(struct proto *proto,
929 struct cg_proto *cg_proto) 929 struct cg_proto *cg_proto)
930{ 930{
931 return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg)); 931 return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
932} 932}
933#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled) 933#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
934#else 934#else
935#define mem_cgroup_sockets_enabled 0 935#define mem_cgroup_sockets_enabled 0
936static inline struct cg_proto *parent_cg_proto(struct proto *proto, 936static inline struct cg_proto *parent_cg_proto(struct proto *proto,
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 1bcc2a8c00e2..14b38940062b 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -151,6 +151,8 @@ enum {
151 events get removed */ 151 events get removed */
152static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {}; 152static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {};
153static inline void trace_power_end(u64 cpuid) {}; 153static inline void trace_power_end(u64 cpuid) {};
154static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {};
155static inline void trace_power_end_rcuidle(u64 cpuid) {};
154static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {}; 156static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {};
155#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */ 157#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
156 158
diff --git a/include/trace/events/printk.h b/include/trace/events/printk.h
new file mode 100644
index 000000000000..94ec79cc011a
--- /dev/null
+++ b/include/trace/events/printk.h
@@ -0,0 +1,41 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM printk
3
4#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_PRINTK_H
6
7#include <linux/tracepoint.h>
8
9TRACE_EVENT_CONDITION(console,
10 TP_PROTO(const char *log_buf, unsigned start, unsigned end,
11 unsigned log_buf_len),
12
13 TP_ARGS(log_buf, start, end, log_buf_len),
14
15 TP_CONDITION(start != end),
16
17 TP_STRUCT__entry(
18 __dynamic_array(char, msg, end - start + 1)
19 ),
20
21 TP_fast_assign(
22 if ((start & (log_buf_len - 1)) > (end & (log_buf_len - 1))) {
23 memcpy(__get_dynamic_array(msg),
24 log_buf + (start & (log_buf_len - 1)),
25 log_buf_len - (start & (log_buf_len - 1)));
26 memcpy((char *)__get_dynamic_array(msg) +
27 log_buf_len - (start & (log_buf_len - 1)),
28 log_buf, end & (log_buf_len - 1));
29 } else
30 memcpy(__get_dynamic_array(msg),
31 log_buf + (start & (log_buf_len - 1)),
32 end - start);
33 ((char *)__get_dynamic_array(msg))[end - start] = 0;
34 ),
35
36 TP_printk("%s", __get_str(msg))
37);
38#endif /* _TRACE_PRINTK_H */
39
40/* This part must be outside protection */
41#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index e33ed1bfa113..fbc7b1ad929b 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -6,6 +6,7 @@
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9#include <linux/binfmts.h>
9 10
10/* 11/*
11 * Tracepoint for calling kthread_stop, performed to end a kthread: 12 * Tracepoint for calling kthread_stop, performed to end a kthread:
@@ -276,6 +277,32 @@ TRACE_EVENT(sched_process_fork,
276); 277);
277 278
278/* 279/*
280 * Tracepoint for exec:
281 */
282TRACE_EVENT(sched_process_exec,
283
284 TP_PROTO(struct task_struct *p, pid_t old_pid,
285 struct linux_binprm *bprm),
286
287 TP_ARGS(p, old_pid, bprm),
288
289 TP_STRUCT__entry(
290 __string( filename, bprm->filename )
291 __field( pid_t, pid )
292 __field( pid_t, old_pid )
293 ),
294
295 TP_fast_assign(
296 __assign_str(filename, bprm->filename);
297 __entry->pid = p->pid;
298 __entry->old_pid = p->pid;
299 ),
300
301 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
302 __entry->pid, __entry->old_pid)
303);
304
305/*
279 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE 306 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
280 * adding sched_stat support to SCHED_FIFO/RR would be welcome. 307 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
281 */ 308 */
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
index 17df43464df0..39a8a430d90f 100644
--- a/include/trace/events/signal.h
+++ b/include/trace/events/signal.h
@@ -23,11 +23,23 @@
23 } \ 23 } \
24 } while (0) 24 } while (0)
25 25
26#ifndef TRACE_HEADER_MULTI_READ
27enum {
28 TRACE_SIGNAL_DELIVERED,
29 TRACE_SIGNAL_IGNORED,
30 TRACE_SIGNAL_ALREADY_PENDING,
31 TRACE_SIGNAL_OVERFLOW_FAIL,
32 TRACE_SIGNAL_LOSE_INFO,
33};
34#endif
35
26/** 36/**
27 * signal_generate - called when a signal is generated 37 * signal_generate - called when a signal is generated
28 * @sig: signal number 38 * @sig: signal number
29 * @info: pointer to struct siginfo 39 * @info: pointer to struct siginfo
30 * @task: pointer to struct task_struct 40 * @task: pointer to struct task_struct
41 * @group: shared or private
42 * @result: TRACE_SIGNAL_*
31 * 43 *
32 * Current process sends a 'sig' signal to 'task' process with 44 * Current process sends a 'sig' signal to 'task' process with
33 * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV, 45 * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV,
@@ -37,9 +49,10 @@
37 */ 49 */
38TRACE_EVENT(signal_generate, 50TRACE_EVENT(signal_generate,
39 51
40 TP_PROTO(int sig, struct siginfo *info, struct task_struct *task), 52 TP_PROTO(int sig, struct siginfo *info, struct task_struct *task,
53 int group, int result),
41 54
42 TP_ARGS(sig, info, task), 55 TP_ARGS(sig, info, task, group, result),
43 56
44 TP_STRUCT__entry( 57 TP_STRUCT__entry(
45 __field( int, sig ) 58 __field( int, sig )
@@ -47,6 +60,8 @@ TRACE_EVENT(signal_generate,
47 __field( int, code ) 60 __field( int, code )
48 __array( char, comm, TASK_COMM_LEN ) 61 __array( char, comm, TASK_COMM_LEN )
49 __field( pid_t, pid ) 62 __field( pid_t, pid )
63 __field( int, group )
64 __field( int, result )
50 ), 65 ),
51 66
52 TP_fast_assign( 67 TP_fast_assign(
@@ -54,11 +69,14 @@ TRACE_EVENT(signal_generate,
54 TP_STORE_SIGINFO(__entry, info); 69 TP_STORE_SIGINFO(__entry, info);
55 memcpy(__entry->comm, task->comm, TASK_COMM_LEN); 70 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
56 __entry->pid = task->pid; 71 __entry->pid = task->pid;
72 __entry->group = group;
73 __entry->result = result;
57 ), 74 ),
58 75
59 TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d", 76 TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d grp=%d res=%d",
60 __entry->sig, __entry->errno, __entry->code, 77 __entry->sig, __entry->errno, __entry->code,
61 __entry->comm, __entry->pid) 78 __entry->comm, __entry->pid, __entry->group,
79 __entry->result)
62); 80);
63 81
64/** 82/**
@@ -101,65 +119,6 @@ TRACE_EVENT(signal_deliver,
101 __entry->sa_handler, __entry->sa_flags) 119 __entry->sa_handler, __entry->sa_flags)
102); 120);
103 121
104DECLARE_EVENT_CLASS(signal_queue_overflow,
105
106 TP_PROTO(int sig, int group, struct siginfo *info),
107
108 TP_ARGS(sig, group, info),
109
110 TP_STRUCT__entry(
111 __field( int, sig )
112 __field( int, group )
113 __field( int, errno )
114 __field( int, code )
115 ),
116
117 TP_fast_assign(
118 __entry->sig = sig;
119 __entry->group = group;
120 TP_STORE_SIGINFO(__entry, info);
121 ),
122
123 TP_printk("sig=%d group=%d errno=%d code=%d",
124 __entry->sig, __entry->group, __entry->errno, __entry->code)
125);
126
127/**
128 * signal_overflow_fail - called when signal queue is overflow
129 * @sig: signal number
130 * @group: signal to process group or not (bool)
131 * @info: pointer to struct siginfo
132 *
133 * Kernel fails to generate 'sig' signal with 'info' siginfo, because
134 * siginfo queue is overflow, and the signal is dropped.
135 * 'group' is not 0 if the signal will be sent to a process group.
136 * 'sig' is always one of RT signals.
137 */
138DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
139
140 TP_PROTO(int sig, int group, struct siginfo *info),
141
142 TP_ARGS(sig, group, info)
143);
144
145/**
146 * signal_lose_info - called when siginfo is lost
147 * @sig: signal number
148 * @group: signal to process group or not (bool)
149 * @info: pointer to struct siginfo
150 *
151 * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo
152 * queue is overflow.
153 * 'group' is not 0 if the signal will be sent to a process group.
154 * 'sig' is always one of non-RT signals.
155 */
156DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
157
158 TP_PROTO(int sig, int group, struct siginfo *info),
159
160 TP_ARGS(sig, group, info)
161);
162
163#endif /* _TRACE_SIGNAL_H */ 122#endif /* _TRACE_SIGNAL_H */
164 123
165/* This part must be outside protection */ 124/* This part must be outside protection */