aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blktrace_api.h172
-rw-r--r--include/linux/compiler.h84
-rw-r--r--include/linux/ftrace.h189
-rw-r--r--include/linux/ftrace_irq.h13
-rw-r--r--include/linux/hardirq.h15
-rw-r--r--include/linux/marker.h75
-rw-r--r--include/linux/rcupdate.h2
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/sched.h24
-rw-r--r--include/linux/seq_file.h1
-rw-r--r--include/linux/stacktrace.h8
-rw-r--r--include/linux/tracepoint.h57
-rw-r--r--include/linux/tty.h2
13 files changed, 413 insertions, 230 deletions
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index bdf505d33e77..1dba3493d520 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -160,7 +160,6 @@ struct blk_trace {
160 160
161extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); 161extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
162extern void blk_trace_shutdown(struct request_queue *); 162extern void blk_trace_shutdown(struct request_queue *);
163extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
164extern int do_blk_trace_setup(struct request_queue *q, 163extern int do_blk_trace_setup(struct request_queue *q,
165 char *name, dev_t dev, struct blk_user_trace_setup *buts); 164 char *name, dev_t dev, struct blk_user_trace_setup *buts);
166extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); 165extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
@@ -186,168 +185,8 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
186 } while (0) 185 } while (0)
187#define BLK_TN_MAX_MSG 128 186#define BLK_TN_MAX_MSG 128
188 187
189/** 188extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
190 * blk_add_trace_rq - Add a trace for a request oriented action 189 void *data, size_t len);
191 * @q: queue the io is for
192 * @rq: the source request
193 * @what: the action
194 *
195 * Description:
196 * Records an action against a request. Will log the bio offset + size.
197 *
198 **/
199static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
200 u32 what)
201{
202 struct blk_trace *bt = q->blk_trace;
203 int rw = rq->cmd_flags & 0x03;
204
205 if (likely(!bt))
206 return;
207
208 if (blk_discard_rq(rq))
209 rw |= (1 << BIO_RW_DISCARD);
210
211 if (blk_pc_request(rq)) {
212 what |= BLK_TC_ACT(BLK_TC_PC);
213 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
214 } else {
215 what |= BLK_TC_ACT(BLK_TC_FS);
216 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
217 }
218}
219
220/**
221 * blk_add_trace_bio - Add a trace for a bio oriented action
222 * @q: queue the io is for
223 * @bio: the source bio
224 * @what: the action
225 *
226 * Description:
227 * Records an action against a bio. Will log the bio offset + size.
228 *
229 **/
230static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
231 u32 what)
232{
233 struct blk_trace *bt = q->blk_trace;
234
235 if (likely(!bt))
236 return;
237
238 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
239}
240
241/**
242 * blk_add_trace_generic - Add a trace for a generic action
243 * @q: queue the io is for
244 * @bio: the source bio
245 * @rw: the data direction
246 * @what: the action
247 *
248 * Description:
249 * Records a simple trace
250 *
251 **/
252static inline void blk_add_trace_generic(struct request_queue *q,
253 struct bio *bio, int rw, u32 what)
254{
255 struct blk_trace *bt = q->blk_trace;
256
257 if (likely(!bt))
258 return;
259
260 if (bio)
261 blk_add_trace_bio(q, bio, what);
262 else
263 __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
264}
265
266/**
267 * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
268 * @q: queue the io is for
269 * @what: the action
270 * @bio: the source bio
271 * @pdu: the integer payload
272 *
273 * Description:
274 * Adds a trace with some integer payload. This might be an unplug
275 * option given as the action, with the depth at unplug time given
276 * as the payload
277 *
278 **/
279static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
280 struct bio *bio, unsigned int pdu)
281{
282 struct blk_trace *bt = q->blk_trace;
283 __be64 rpdu = cpu_to_be64(pdu);
284
285 if (likely(!bt))
286 return;
287
288 if (bio)
289 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
290 else
291 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
292}
293
294/**
295 * blk_add_trace_remap - Add a trace for a remap operation
296 * @q: queue the io is for
297 * @bio: the source bio
298 * @dev: target device
299 * @from: source sector
300 * @to: target sector
301 *
302 * Description:
303 * Device mapper or raid target sometimes need to split a bio because
304 * it spans a stripe (or similar). Add a trace for that action.
305 *
306 **/
307static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
308 dev_t dev, sector_t from, sector_t to)
309{
310 struct blk_trace *bt = q->blk_trace;
311 struct blk_io_trace_remap r;
312
313 if (likely(!bt))
314 return;
315
316 r.device = cpu_to_be32(dev);
317 r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
318 r.sector = cpu_to_be64(to);
319
320 __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
321}
322
323/**
324 * blk_add_driver_data - Add binary message with driver-specific data
325 * @q: queue the io is for
326 * @rq: io request
327 * @data: driver-specific data
328 * @len: length of driver-specific data
329 *
330 * Description:
331 * Some drivers might want to write driver-specific data per request.
332 *
333 **/
334static inline void blk_add_driver_data(struct request_queue *q,
335 struct request *rq,
336 void *data, size_t len)
337{
338 struct blk_trace *bt = q->blk_trace;
339
340 if (likely(!bt))
341 return;
342
343 if (blk_pc_request(rq))
344 __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
345 rq->errors, len, data);
346 else
347 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
348 0, BLK_TA_DRV_DATA, rq->errors, len, data);
349}
350
351extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 190extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
352 char __user *arg); 191 char __user *arg);
353extern int blk_trace_startstop(struct request_queue *q, int start); 192extern int blk_trace_startstop(struct request_queue *q, int start);
@@ -356,13 +195,8 @@ extern int blk_trace_remove(struct request_queue *q);
356#else /* !CONFIG_BLK_DEV_IO_TRACE */ 195#else /* !CONFIG_BLK_DEV_IO_TRACE */
357#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) 196#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
358#define blk_trace_shutdown(q) do { } while (0) 197#define blk_trace_shutdown(q) do { } while (0)
359#define blk_add_trace_rq(q, rq, what) do { } while (0)
360#define blk_add_trace_bio(q, rq, what) do { } while (0)
361#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
362#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
363#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
364#define blk_add_driver_data(q, rq, data, len) do {} while (0)
365#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) 198#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
199#define blk_add_driver_data(q, rq, data, len) do {} while (0)
366#define blk_trace_setup(q, name, dev, arg) (-ENOTTY) 200#define blk_trace_setup(q, name, dev, arg) (-ENOTTY)
367#define blk_trace_startstop(q, start) (-ENOTTY) 201#define blk_trace_startstop(q, start) (-ENOTTY)
368#define blk_trace_remove(q) (-ENOTTY) 202#define blk_trace_remove(q) (-ENOTTY)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 98115d9d04da..ea7c6be354b7 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -59,8 +59,88 @@ extern void __chk_io_ptr(const volatile void __iomem *);
59 * specific implementations come from the above header files 59 * specific implementations come from the above header files
60 */ 60 */
61 61
62#define likely(x) __builtin_expect(!!(x), 1) 62struct ftrace_branch_data {
63#define unlikely(x) __builtin_expect(!!(x), 0) 63 const char *func;
64 const char *file;
65 unsigned line;
66 union {
67 struct {
68 unsigned long correct;
69 unsigned long incorrect;
70 };
71 struct {
72 unsigned long miss;
73 unsigned long hit;
74 };
75 };
76};
77
78/*
79 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
80 * to disable branch tracing on a per file basis.
81 */
82#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING)
83void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
84
85#define likely_notrace(x) __builtin_expect(!!(x), 1)
86#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
87
88#define __branch_check__(x, expect) ({ \
89 int ______r; \
90 static struct ftrace_branch_data \
91 __attribute__((__aligned__(4))) \
92 __attribute__((section("_ftrace_annotated_branch"))) \
93 ______f = { \
94 .func = __func__, \
95 .file = __FILE__, \
96 .line = __LINE__, \
97 }; \
98 ______r = likely_notrace(x); \
99 ftrace_likely_update(&______f, ______r, expect); \
100 ______r; \
101 })
102
103/*
104 * Using __builtin_constant_p(x) to ignore cases where the return
105 * value is always the same. This idea is taken from a similar patch
106 * written by Daniel Walker.
107 */
108# ifndef likely
109# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
110# endif
111# ifndef unlikely
112# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
113# endif
114
115#ifdef CONFIG_PROFILE_ALL_BRANCHES
116/*
117 * "Define 'is'", Bill Clinton
118 * "Define 'if'", Steven Rostedt
119 */
120#define if(cond) if (__builtin_constant_p((cond)) ? !!(cond) : \
121 ({ \
122 int ______r; \
123 static struct ftrace_branch_data \
124 __attribute__((__aligned__(4))) \
125 __attribute__((section("_ftrace_branch"))) \
126 ______f = { \
127 .func = __func__, \
128 .file = __FILE__, \
129 .line = __LINE__, \
130 }; \
131 ______r = !!(cond); \
132 if (______r) \
133 ______f.hit++; \
134 else \
135 ______f.miss++; \
136 ______r; \
137 }))
138#endif /* CONFIG_PROFILE_ALL_BRANCHES */
139
140#else
141# define likely(x) __builtin_expect(!!(x), 1)
142# define unlikely(x) __builtin_expect(!!(x), 0)
143#endif
64 144
65/* Optimization barrier */ 145/* Optimization barrier */
66#ifndef barrier 146#ifndef barrier
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 703eb53cfa2b..afba918c623c 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -23,6 +23,45 @@ struct ftrace_ops {
23 struct ftrace_ops *next; 23 struct ftrace_ops *next;
24}; 24};
25 25
26extern int function_trace_stop;
27
28/*
29 * Type of the current tracing.
30 */
31enum ftrace_tracing_type_t {
32 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
33 FTRACE_TYPE_RETURN, /* Hook the return of the function */
34};
35
36/* Current tracing type, default is FTRACE_TYPE_ENTER */
37extern enum ftrace_tracing_type_t ftrace_tracing_type;
38
39/**
40 * ftrace_stop - stop function tracer.
41 *
42 * A quick way to stop the function tracer. Note this an on off switch,
43 * it is not something that is recursive like preempt_disable.
44 * This does not disable the calling of mcount, it only stops the
45 * calling of functions from mcount.
46 */
47static inline void ftrace_stop(void)
48{
49 function_trace_stop = 1;
50}
51
52/**
53 * ftrace_start - start the function tracer.
54 *
55 * This function is the inverse of ftrace_stop. This does not enable
56 * the function tracing if the function tracer is disabled. This only
57 * sets the function tracer flag to continue calling the functions
58 * from mcount.
59 */
60static inline void ftrace_start(void)
61{
62 function_trace_stop = 0;
63}
64
26/* 65/*
27 * The ftrace_ops must be a static and should also 66 * The ftrace_ops must be a static and should also
28 * be read_mostly. These functions do modify read_mostly variables 67 * be read_mostly. These functions do modify read_mostly variables
@@ -41,9 +80,13 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
41# define unregister_ftrace_function(ops) do { } while (0) 80# define unregister_ftrace_function(ops) do { } while (0)
42# define clear_ftrace_function(ops) do { } while (0) 81# define clear_ftrace_function(ops) do { } while (0)
43static inline void ftrace_kill(void) { } 82static inline void ftrace_kill(void) { }
83static inline void ftrace_stop(void) { }
84static inline void ftrace_start(void) { }
44#endif /* CONFIG_FUNCTION_TRACER */ 85#endif /* CONFIG_FUNCTION_TRACER */
45 86
46#ifdef CONFIG_DYNAMIC_FTRACE 87#ifdef CONFIG_DYNAMIC_FTRACE
88/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
89#include <asm/ftrace.h>
47 90
48enum { 91enum {
49 FTRACE_FL_FREE = (1 << 0), 92 FTRACE_FL_FREE = (1 << 0),
@@ -59,6 +102,7 @@ struct dyn_ftrace {
59 struct list_head list; 102 struct list_head list;
60 unsigned long ip; /* address of mcount call-site */ 103 unsigned long ip; /* address of mcount call-site */
61 unsigned long flags; 104 unsigned long flags;
105 struct dyn_arch_ftrace arch;
62}; 106};
63 107
64int ftrace_force_update(void); 108int ftrace_force_update(void);
@@ -66,19 +110,48 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset);
66 110
67/* defined in arch */ 111/* defined in arch */
68extern int ftrace_ip_converted(unsigned long ip); 112extern int ftrace_ip_converted(unsigned long ip);
69extern unsigned char *ftrace_nop_replace(void);
70extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
71extern int ftrace_dyn_arch_init(void *data); 113extern int ftrace_dyn_arch_init(void *data);
72extern int ftrace_update_ftrace_func(ftrace_func_t func); 114extern int ftrace_update_ftrace_func(ftrace_func_t func);
73extern void ftrace_caller(void); 115extern void ftrace_caller(void);
74extern void ftrace_call(void); 116extern void ftrace_call(void);
75extern void mcount_call(void); 117extern void mcount_call(void);
118#ifdef CONFIG_FUNCTION_GRAPH_TRACER
119extern void ftrace_graph_caller(void);
120extern int ftrace_enable_ftrace_graph_caller(void);
121extern int ftrace_disable_ftrace_graph_caller(void);
122#else
123static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
124static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
125#endif
126
127/**
128 * ftrace_make_nop - convert code into top
129 * @mod: module structure if called by module load initialization
130 * @rec: the mcount call site record
131 * @addr: the address that the call site should be calling
132 *
133 * This is a very sensitive operation and great care needs
134 * to be taken by the arch. The operation should carefully
135 * read the location, check to see if what is read is indeed
136 * what we expect it to be, and then on success of the compare,
137 * it should write to the location.
138 *
139 * The code segment at @rec->ip should be a caller to @addr
140 *
141 * Return must be:
142 * 0 on success
143 * -EFAULT on error reading the location
144 * -EINVAL on a failed compare of the contents
145 * -EPERM on error writing to the location
146 * Any other value will be considered a failure.
147 */
148extern int ftrace_make_nop(struct module *mod,
149 struct dyn_ftrace *rec, unsigned long addr);
76 150
77/** 151/**
78 * ftrace_modify_code - modify code segment 152 * ftrace_make_call - convert a nop call site into a call to addr
79 * @ip: the address of the code segment 153 * @rec: the mcount call site record
80 * @old_code: the contents of what is expected to be there 154 * @addr: the address that the call site should call
81 * @new_code: the code to patch in
82 * 155 *
83 * This is a very sensitive operation and great care needs 156 * This is a very sensitive operation and great care needs
84 * to be taken by the arch. The operation should carefully 157 * to be taken by the arch. The operation should carefully
@@ -86,6 +159,8 @@ extern void mcount_call(void);
86 * what we expect it to be, and then on success of the compare, 159 * what we expect it to be, and then on success of the compare,
87 * it should write to the location. 160 * it should write to the location.
88 * 161 *
162 * The code segment at @rec->ip should be a nop
163 *
89 * Return must be: 164 * Return must be:
90 * 0 on success 165 * 0 on success
91 * -EFAULT on error reading the location 166 * -EFAULT on error reading the location
@@ -93,8 +168,11 @@ extern void mcount_call(void);
93 * -EPERM on error writing to the location 168 * -EPERM on error writing to the location
94 * Any other value will be considered a failure. 169 * Any other value will be considered a failure.
95 */ 170 */
96extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, 171extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
97 unsigned char *new_code); 172
173
174/* May be defined in arch */
175extern int ftrace_arch_read_dyn_info(char *buf, int size);
98 176
99extern int skip_trace(unsigned long ip); 177extern int skip_trace(unsigned long ip);
100 178
@@ -102,7 +180,6 @@ extern void ftrace_release(void *start, unsigned long size);
102 180
103extern void ftrace_disable_daemon(void); 181extern void ftrace_disable_daemon(void);
104extern void ftrace_enable_daemon(void); 182extern void ftrace_enable_daemon(void);
105
106#else 183#else
107# define skip_trace(ip) ({ 0; }) 184# define skip_trace(ip) ({ 0; })
108# define ftrace_force_update() ({ 0; }) 185# define ftrace_force_update() ({ 0; })
@@ -181,6 +258,12 @@ static inline void __ftrace_enabled_restore(int enabled)
181#endif 258#endif
182 259
183#ifdef CONFIG_TRACING 260#ifdef CONFIG_TRACING
261extern int ftrace_dump_on_oops;
262
263extern void tracing_start(void);
264extern void tracing_stop(void);
265extern void ftrace_off_permanent(void);
266
184extern void 267extern void
185ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); 268ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
186 269
@@ -211,6 +294,9 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
211static inline int 294static inline int
212ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0))); 295ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)));
213 296
297static inline void tracing_start(void) { }
298static inline void tracing_stop(void) { }
299static inline void ftrace_off_permanent(void) { }
214static inline int 300static inline int
215ftrace_printk(const char *fmt, ...) 301ftrace_printk(const char *fmt, ...)
216{ 302{
@@ -221,33 +307,86 @@ static inline void ftrace_dump(void) { }
221 307
222#ifdef CONFIG_FTRACE_MCOUNT_RECORD 308#ifdef CONFIG_FTRACE_MCOUNT_RECORD
223extern void ftrace_init(void); 309extern void ftrace_init(void);
224extern void ftrace_init_module(unsigned long *start, unsigned long *end); 310extern void ftrace_init_module(struct module *mod,
311 unsigned long *start, unsigned long *end);
225#else 312#else
226static inline void ftrace_init(void) { } 313static inline void ftrace_init(void) { }
227static inline void 314static inline void
228ftrace_init_module(unsigned long *start, unsigned long *end) { } 315ftrace_init_module(struct module *mod,
316 unsigned long *start, unsigned long *end) { }
229#endif 317#endif
230 318
319enum {
320 POWER_NONE = 0,
321 POWER_CSTATE = 1,
322 POWER_PSTATE = 2,
323};
231 324
232struct boot_trace { 325struct power_trace {
233 pid_t caller; 326#ifdef CONFIG_POWER_TRACER
234 char func[KSYM_NAME_LEN]; 327 ktime_t stamp;
235 int result; 328 ktime_t end;
236 unsigned long long duration; /* usecs */ 329 int type;
237 ktime_t calltime; 330 int state;
238 ktime_t rettime; 331#endif
239}; 332};
240 333
241#ifdef CONFIG_BOOT_TRACER 334#ifdef CONFIG_POWER_TRACER
242extern void trace_boot(struct boot_trace *it, initcall_t fn); 335extern void trace_power_start(struct power_trace *it, unsigned int type,
243extern void start_boot_trace(void); 336 unsigned int state);
244extern void stop_boot_trace(void); 337extern void trace_power_mark(struct power_trace *it, unsigned int type,
338 unsigned int state);
339extern void trace_power_end(struct power_trace *it);
245#else 340#else
246static inline void trace_boot(struct boot_trace *it, initcall_t fn) { } 341static inline void trace_power_start(struct power_trace *it, unsigned int type,
247static inline void start_boot_trace(void) { } 342 unsigned int state) { }
248static inline void stop_boot_trace(void) { } 343static inline void trace_power_mark(struct power_trace *it, unsigned int type,
344 unsigned int state) { }
345static inline void trace_power_end(struct power_trace *it) { }
249#endif 346#endif
250 347
251 348
349/*
350 * Structure that defines an entry function trace.
351 */
352struct ftrace_graph_ent {
353 unsigned long func; /* Current function */
354 int depth;
355};
356
357/*
358 * Structure that defines a return function trace.
359 */
360struct ftrace_graph_ret {
361 unsigned long func; /* Current function */
362 unsigned long long calltime;
363 unsigned long long rettime;
364 /* Number of functions that overran the depth limit for current task */
365 unsigned long overrun;
366 int depth;
367};
368
369#ifdef CONFIG_FUNCTION_GRAPH_TRACER
370#define FTRACE_RETFUNC_DEPTH 50
371#define FTRACE_RETSTACK_ALLOC_SIZE 32
372/* Type of the callback handlers for tracing function graph*/
373typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
374typedef void (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
375
376extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
377 trace_func_graph_ent_t entryfunc);
378
379/* The current handlers in use */
380extern trace_func_graph_ret_t ftrace_graph_return;
381extern trace_func_graph_ent_t ftrace_graph_entry;
382
383extern void unregister_ftrace_graph(void);
384
385extern void ftrace_graph_init_task(struct task_struct *t);
386extern void ftrace_graph_exit_task(struct task_struct *t);
387#else
388static inline void ftrace_graph_init_task(struct task_struct *t) { }
389static inline void ftrace_graph_exit_task(struct task_struct *t) { }
390#endif
252 391
253#endif /* _LINUX_FTRACE_H */ 392#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
new file mode 100644
index 000000000000..366a054d0b05
--- /dev/null
+++ b/include/linux/ftrace_irq.h
@@ -0,0 +1,13 @@
1#ifndef _LINUX_FTRACE_IRQ_H
2#define _LINUX_FTRACE_IRQ_H
3
4
5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
6extern void ftrace_nmi_enter(void);
7extern void ftrace_nmi_exit(void);
8#else
9static inline void ftrace_nmi_enter(void) { }
10static inline void ftrace_nmi_exit(void) { }
11#endif
12
13#endif /* _LINUX_FTRACE_IRQ_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 181006cc94a0..89a56d79e4c6 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -4,6 +4,7 @@
4#include <linux/preempt.h> 4#include <linux/preempt.h>
5#include <linux/smp_lock.h> 5#include <linux/smp_lock.h>
6#include <linux/lockdep.h> 6#include <linux/lockdep.h>
7#include <linux/ftrace_irq.h>
7#include <asm/hardirq.h> 8#include <asm/hardirq.h>
8#include <asm/system.h> 9#include <asm/system.h>
9 10
@@ -161,7 +162,17 @@ extern void irq_enter(void);
161 */ 162 */
162extern void irq_exit(void); 163extern void irq_exit(void);
163 164
164#define nmi_enter() do { lockdep_off(); __irq_enter(); } while (0) 165#define nmi_enter() \
165#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0) 166 do { \
167 ftrace_nmi_enter(); \
168 lockdep_off(); \
169 __irq_enter(); \
170 } while (0)
171#define nmi_exit() \
172 do { \
173 __irq_exit(); \
174 lockdep_on(); \
175 ftrace_nmi_exit(); \
176 } while (0)
166 177
167#endif /* LINUX_HARDIRQ_H */ 178#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/marker.h b/include/linux/marker.h
index 889196c7fbb1..b85e74ca782f 100644
--- a/include/linux/marker.h
+++ b/include/linux/marker.h
@@ -12,6 +12,7 @@
12 * See the file COPYING for more details. 12 * See the file COPYING for more details.
13 */ 13 */
14 14
15#include <stdarg.h>
15#include <linux/types.h> 16#include <linux/types.h>
16 17
17struct module; 18struct module;
@@ -48,10 +49,28 @@ struct marker {
48 void (*call)(const struct marker *mdata, void *call_private, ...); 49 void (*call)(const struct marker *mdata, void *call_private, ...);
49 struct marker_probe_closure single; 50 struct marker_probe_closure single;
50 struct marker_probe_closure *multi; 51 struct marker_probe_closure *multi;
52 const char *tp_name; /* Optional tracepoint name */
53 void *tp_cb; /* Optional tracepoint callback */
51} __attribute__((aligned(8))); 54} __attribute__((aligned(8)));
52 55
53#ifdef CONFIG_MARKERS 56#ifdef CONFIG_MARKERS
54 57
58#define _DEFINE_MARKER(name, tp_name_str, tp_cb, format) \
59 static const char __mstrtab_##name[] \
60 __attribute__((section("__markers_strings"))) \
61 = #name "\0" format; \
62 static struct marker __mark_##name \
63 __attribute__((section("__markers"), aligned(8))) = \
64 { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \
65 0, 0, marker_probe_cb, { __mark_empty_function, NULL},\
66 NULL, tp_name_str, tp_cb }
67
68#define DEFINE_MARKER(name, format) \
69 _DEFINE_MARKER(name, NULL, NULL, format)
70
71#define DEFINE_MARKER_TP(name, tp_name, tp_cb, format) \
72 _DEFINE_MARKER(name, #tp_name, tp_cb, format)
73
55/* 74/*
56 * Note : the empty asm volatile with read constraint is used here instead of a 75 * Note : the empty asm volatile with read constraint is used here instead of a
57 * "used" attribute to fix a gcc 4.1.x bug. 76 * "used" attribute to fix a gcc 4.1.x bug.
@@ -65,14 +84,7 @@ struct marker {
65 */ 84 */
66#define __trace_mark(generic, name, call_private, format, args...) \ 85#define __trace_mark(generic, name, call_private, format, args...) \
67 do { \ 86 do { \
68 static const char __mstrtab_##name[] \ 87 DEFINE_MARKER(name, format); \
69 __attribute__((section("__markers_strings"))) \
70 = #name "\0" format; \
71 static struct marker __mark_##name \
72 __attribute__((section("__markers"), aligned(8))) = \
73 { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \
74 0, 0, marker_probe_cb, \
75 { __mark_empty_function, NULL}, NULL }; \
76 __mark_check_format(format, ## args); \ 88 __mark_check_format(format, ## args); \
77 if (unlikely(__mark_##name.state)) { \ 89 if (unlikely(__mark_##name.state)) { \
78 (*__mark_##name.call) \ 90 (*__mark_##name.call) \
@@ -80,14 +92,39 @@ struct marker {
80 } \ 92 } \
81 } while (0) 93 } while (0)
82 94
95#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \
96 do { \
97 void __check_tp_type(void) \
98 { \
99 register_trace_##tp_name(tp_cb); \
100 } \
101 DEFINE_MARKER_TP(name, tp_name, tp_cb, format); \
102 __mark_check_format(format, ## args); \
103 (*__mark_##name.call)(&__mark_##name, call_private, \
104 ## args); \
105 } while (0)
106
83extern void marker_update_probe_range(struct marker *begin, 107extern void marker_update_probe_range(struct marker *begin,
84 struct marker *end); 108 struct marker *end);
109
110#define GET_MARKER(name) (__mark_##name)
111
85#else /* !CONFIG_MARKERS */ 112#else /* !CONFIG_MARKERS */
113#define DEFINE_MARKER(name, tp_name, tp_cb, format)
86#define __trace_mark(generic, name, call_private, format, args...) \ 114#define __trace_mark(generic, name, call_private, format, args...) \
87 __mark_check_format(format, ## args) 115 __mark_check_format(format, ## args)
116#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \
117 do { \
118 void __check_tp_type(void) \
119 { \
120 register_trace_##tp_name(tp_cb); \
121 } \
122 __mark_check_format(format, ## args); \
123 } while (0)
88static inline void marker_update_probe_range(struct marker *begin, 124static inline void marker_update_probe_range(struct marker *begin,
89 struct marker *end) 125 struct marker *end)
90{ } 126{ }
127#define GET_MARKER(name)
91#endif /* CONFIG_MARKERS */ 128#endif /* CONFIG_MARKERS */
92 129
93/** 130/**
@@ -117,6 +154,20 @@ static inline void marker_update_probe_range(struct marker *begin,
117 __trace_mark(1, name, NULL, format, ## args) 154 __trace_mark(1, name, NULL, format, ## args)
118 155
119/** 156/**
157 * trace_mark_tp - Marker in a tracepoint callback
158 * @name: marker name, not quoted.
159 * @tp_name: tracepoint name, not quoted.
160 * @tp_cb: tracepoint callback. Should have an associated global symbol so it
161 * is not optimized away by the compiler (should not be static).
162 * @format: format string
163 * @args...: variable argument list
164 *
165 * Places a marker in a tracepoint callback.
166 */
167#define trace_mark_tp(name, tp_name, tp_cb, format, args...) \
168 __trace_mark_tp(name, NULL, tp_name, tp_cb, format, ## args)
169
170/**
120 * MARK_NOARGS - Format string for a marker with no argument. 171 * MARK_NOARGS - Format string for a marker with no argument.
121 */ 172 */
122#define MARK_NOARGS " " 173#define MARK_NOARGS " "
@@ -136,8 +187,6 @@ extern marker_probe_func __mark_empty_function;
136 187
137extern void marker_probe_cb(const struct marker *mdata, 188extern void marker_probe_cb(const struct marker *mdata,
138 void *call_private, ...); 189 void *call_private, ...);
139extern void marker_probe_cb_noarg(const struct marker *mdata,
140 void *call_private, ...);
141 190
142/* 191/*
143 * Connect a probe to a marker. 192 * Connect a probe to a marker.
@@ -162,8 +211,10 @@ extern void *marker_get_private_data(const char *name, marker_probe_func *probe,
162 211
163/* 212/*
164 * marker_synchronize_unregister must be called between the last marker probe 213 * marker_synchronize_unregister must be called between the last marker probe
165 * unregistration and the end of module exit to make sure there is no caller 214 * unregistration and the first one of
166 * executing a probe when it is freed. 215 * - the end of module exit function
216 * - the free of any resource used by the probes
217 * to ensure the code and data are valid for any possibly running probes.
167 */ 218 */
168#define marker_synchronize_unregister() synchronize_sched() 219#define marker_synchronize_unregister() synchronize_sched()
169 220
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 86f1f5e43e33..895dc9c1088c 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -142,6 +142,7 @@ struct rcu_head {
142 * on the write-side to insure proper synchronization. 142 * on the write-side to insure proper synchronization.
143 */ 143 */
144#define rcu_read_lock_sched() preempt_disable() 144#define rcu_read_lock_sched() preempt_disable()
145#define rcu_read_lock_sched_notrace() preempt_disable_notrace()
145 146
146/* 147/*
147 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section 148 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
@@ -149,6 +150,7 @@ struct rcu_head {
149 * See rcu_read_lock_sched for more information. 150 * See rcu_read_lock_sched for more information.
150 */ 151 */
151#define rcu_read_unlock_sched() preempt_enable() 152#define rcu_read_unlock_sched() preempt_enable()
153#define rcu_read_unlock_sched_notrace() preempt_enable_notrace()
152 154
153 155
154 156
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index e097c2e6b6dc..3bb87a753fa3 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -122,6 +122,7 @@ void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
122 122
123void tracing_on(void); 123void tracing_on(void);
124void tracing_off(void); 124void tracing_off(void);
125void tracing_off_permanent(void);
125 126
126enum ring_buffer_flags { 127enum ring_buffer_flags {
127 RB_FL_OVERWRITE = 1 << 0, 128 RB_FL_OVERWRITE = 1 << 0,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 55e30d114477..2d0a93c31228 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -96,6 +96,7 @@ struct exec_domain;
96struct futex_pi_state; 96struct futex_pi_state;
97struct robust_list_head; 97struct robust_list_head;
98struct bio; 98struct bio;
99struct bts_tracer;
99 100
100/* 101/*
101 * List of flags we want to share for kernel threads, 102 * List of flags we want to share for kernel threads,
@@ -1165,6 +1166,18 @@ struct task_struct {
1165 struct list_head ptraced; 1166 struct list_head ptraced;
1166 struct list_head ptrace_entry; 1167 struct list_head ptrace_entry;
1167 1168
1169#ifdef CONFIG_X86_PTRACE_BTS
1170 /*
1171 * This is the tracer handle for the ptrace BTS extension.
1172 * This field actually belongs to the ptracer task.
1173 */
1174 struct bts_tracer *bts;
1175 /*
1176 * The buffer to hold the BTS data.
1177 */
1178 void *bts_buffer;
1179#endif /* CONFIG_X86_PTRACE_BTS */
1180
1168 /* PID/PID hash table linkage. */ 1181 /* PID/PID hash table linkage. */
1169 struct pid_link pids[PIDTYPE_MAX]; 1182 struct pid_link pids[PIDTYPE_MAX];
1170 struct list_head thread_group; 1183 struct list_head thread_group;
@@ -1356,6 +1369,17 @@ struct task_struct {
1356 unsigned long default_timer_slack_ns; 1369 unsigned long default_timer_slack_ns;
1357 1370
1358 struct list_head *scm_work_list; 1371 struct list_head *scm_work_list;
1372#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1373 /* Index of current stored adress in ret_stack */
1374 int curr_ret_stack;
1375 /* Stack of return addresses for return function tracing */
1376 struct ftrace_ret_stack *ret_stack;
1377 /*
1378 * Number of functions that haven't been traced
1379 * because of depth overrun.
1380 */
1381 atomic_t trace_overrun;
1382#endif
1359}; 1383};
1360 1384
1361/* 1385/*
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index dc50bcc282a8..b3dfa72f13b9 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -34,6 +34,7 @@ struct seq_operations {
34 34
35#define SEQ_SKIP 1 35#define SEQ_SKIP 1
36 36
37char *mangle_path(char *s, char *p, char *esc);
37int seq_open(struct file *, const struct seq_operations *); 38int seq_open(struct file *, const struct seq_operations *);
38ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); 39ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
39loff_t seq_lseek(struct file *, loff_t, int); 40loff_t seq_lseek(struct file *, loff_t, int);
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index b106fd8e0d5c..1a8cecc4f38c 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -15,9 +15,17 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
15 struct stack_trace *trace); 15 struct stack_trace *trace);
16 16
17extern void print_stack_trace(struct stack_trace *trace, int spaces); 17extern void print_stack_trace(struct stack_trace *trace, int spaces);
18
19#ifdef CONFIG_USER_STACKTRACE_SUPPORT
20extern void save_stack_trace_user(struct stack_trace *trace);
21#else
22# define save_stack_trace_user(trace) do { } while (0)
23#endif
24
18#else 25#else
19# define save_stack_trace(trace) do { } while (0) 26# define save_stack_trace(trace) do { } while (0)
20# define save_stack_trace_tsk(tsk, trace) do { } while (0) 27# define save_stack_trace_tsk(tsk, trace) do { } while (0)
28# define save_stack_trace_user(trace) do { } while (0)
21# define print_stack_trace(trace, spaces) do { } while (0) 29# define print_stack_trace(trace, spaces) do { } while (0)
22#endif 30#endif
23 31
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index c5bb39c7a770..757005458366 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -24,8 +24,12 @@ struct tracepoint {
24 const char *name; /* Tracepoint name */ 24 const char *name; /* Tracepoint name */
25 int state; /* State. */ 25 int state; /* State. */
26 void **funcs; 26 void **funcs;
27} __attribute__((aligned(8))); 27} __attribute__((aligned(32))); /*
28 28 * Aligned on 32 bytes because it is
29 * globally visible and gcc happily
30 * align these on the structure size.
31 * Keep in sync with vmlinux.lds.h.
32 */
29 33
30#define TPPROTO(args...) args 34#define TPPROTO(args...) args
31#define TPARGS(args...) args 35#define TPARGS(args...) args
@@ -40,14 +44,14 @@ struct tracepoint {
40 do { \ 44 do { \
41 void **it_func; \ 45 void **it_func; \
42 \ 46 \
43 rcu_read_lock_sched(); \ 47 rcu_read_lock_sched_notrace(); \
44 it_func = rcu_dereference((tp)->funcs); \ 48 it_func = rcu_dereference((tp)->funcs); \
45 if (it_func) { \ 49 if (it_func) { \
46 do { \ 50 do { \
47 ((void(*)(proto))(*it_func))(args); \ 51 ((void(*)(proto))(*it_func))(args); \
48 } while (*(++it_func)); \ 52 } while (*(++it_func)); \
49 } \ 53 } \
50 rcu_read_unlock_sched(); \ 54 rcu_read_unlock_sched_notrace(); \
51 } while (0) 55 } while (0)
52 56
53/* 57/*
@@ -55,35 +59,40 @@ struct tracepoint {
55 * not add unwanted padding between the beginning of the section and the 59 * not add unwanted padding between the beginning of the section and the
56 * structure. Force alignment to the same alignment as the section start. 60 * structure. Force alignment to the same alignment as the section start.
57 */ 61 */
58#define DEFINE_TRACE(name, proto, args) \ 62#define DECLARE_TRACE(name, proto, args) \
63 extern struct tracepoint __tracepoint_##name; \
59 static inline void trace_##name(proto) \ 64 static inline void trace_##name(proto) \
60 { \ 65 { \
61 static const char __tpstrtab_##name[] \
62 __attribute__((section("__tracepoints_strings"))) \
63 = #name ":" #proto; \
64 static struct tracepoint __tracepoint_##name \
65 __attribute__((section("__tracepoints"), aligned(8))) = \
66 { __tpstrtab_##name, 0, NULL }; \
67 if (unlikely(__tracepoint_##name.state)) \ 66 if (unlikely(__tracepoint_##name.state)) \
68 __DO_TRACE(&__tracepoint_##name, \ 67 __DO_TRACE(&__tracepoint_##name, \
69 TPPROTO(proto), TPARGS(args)); \ 68 TPPROTO(proto), TPARGS(args)); \
70 } \ 69 } \
71 static inline int register_trace_##name(void (*probe)(proto)) \ 70 static inline int register_trace_##name(void (*probe)(proto)) \
72 { \ 71 { \
73 return tracepoint_probe_register(#name ":" #proto, \ 72 return tracepoint_probe_register(#name, (void *)probe); \
74 (void *)probe); \
75 } \ 73 } \
76 static inline void unregister_trace_##name(void (*probe)(proto))\ 74 static inline int unregister_trace_##name(void (*probe)(proto)) \
77 { \ 75 { \
78 tracepoint_probe_unregister(#name ":" #proto, \ 76 return tracepoint_probe_unregister(#name, (void *)probe);\
79 (void *)probe); \
80 } 77 }
81 78
79#define DEFINE_TRACE(name) \
80 static const char __tpstrtab_##name[] \
81 __attribute__((section("__tracepoints_strings"))) = #name; \
82 struct tracepoint __tracepoint_##name \
83 __attribute__((section("__tracepoints"), aligned(32))) = \
84 { __tpstrtab_##name, 0, NULL }
85
86#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
87 EXPORT_SYMBOL_GPL(__tracepoint_##name)
88#define EXPORT_TRACEPOINT_SYMBOL(name) \
89 EXPORT_SYMBOL(__tracepoint_##name)
90
82extern void tracepoint_update_probe_range(struct tracepoint *begin, 91extern void tracepoint_update_probe_range(struct tracepoint *begin,
83 struct tracepoint *end); 92 struct tracepoint *end);
84 93
85#else /* !CONFIG_TRACEPOINTS */ 94#else /* !CONFIG_TRACEPOINTS */
86#define DEFINE_TRACE(name, proto, args) \ 95#define DECLARE_TRACE(name, proto, args) \
87 static inline void _do_trace_##name(struct tracepoint *tp, proto) \ 96 static inline void _do_trace_##name(struct tracepoint *tp, proto) \
88 { } \ 97 { } \
89 static inline void trace_##name(proto) \ 98 static inline void trace_##name(proto) \
@@ -92,8 +101,14 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
92 { \ 101 { \
93 return -ENOSYS; \ 102 return -ENOSYS; \
94 } \ 103 } \
95 static inline void unregister_trace_##name(void (*probe)(proto))\ 104 static inline int unregister_trace_##name(void (*probe)(proto)) \
96 { } 105 { \
106 return -ENOSYS; \
107 }
108
109#define DEFINE_TRACE(name)
110#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
111#define EXPORT_TRACEPOINT_SYMBOL(name)
97 112
98static inline void tracepoint_update_probe_range(struct tracepoint *begin, 113static inline void tracepoint_update_probe_range(struct tracepoint *begin,
99 struct tracepoint *end) 114 struct tracepoint *end)
@@ -112,6 +127,10 @@ extern int tracepoint_probe_register(const char *name, void *probe);
112 */ 127 */
113extern int tracepoint_probe_unregister(const char *name, void *probe); 128extern int tracepoint_probe_unregister(const char *name, void *probe);
114 129
130extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
131extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
132extern void tracepoint_probe_update_all(void);
133
115struct tracepoint_iter { 134struct tracepoint_iter {
116 struct module *module; 135 struct module *module;
117 struct tracepoint *tracepoint; 136 struct tracepoint *tracepoint;
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 3b8121d4e36f..eaec37c9d83d 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -325,7 +325,7 @@ extern struct class *tty_class;
325 * go away 325 * go away
326 */ 326 */
327 327
328extern inline struct tty_struct *tty_kref_get(struct tty_struct *tty) 328static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
329{ 329{
330 if (tty) 330 if (tty)
331 kref_get(&tty->kref); 331 kref_get(&tty->kref);