aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-frv/ftrace.h1
-rw-r--r--include/asm-generic/vmlinux.lds.h11
-rw-r--r--include/asm-m32r/ftrace.h1
-rw-r--r--include/asm-mn10300/ftrace.h1
-rw-r--r--include/linux/blktrace_api.h5
-rw-r--r--include/linux/ftrace.h222
-rw-r--r--include/linux/ftrace_irq.h2
-rw-r--r--include/linux/hardirq.h73
-rw-r--r--include/linux/kernel.h58
-rw-r--r--include/linux/lockdep.h50
-rw-r--r--include/linux/mutex.h5
-rw-r--r--include/linux/ring_buffer.h29
-rw-r--r--include/linux/sched.h15
-rw-r--r--include/linux/slab_def.h68
-rw-r--r--include/linux/slob_def.h9
-rw-r--r--include/linux/slub_def.h72
-rw-r--r--include/linux/timer.h93
-rw-r--r--include/linux/trace_clock.h19
-rw-r--r--include/linux/tracepoint.h7
-rw-r--r--include/trace/irq.h9
-rw-r--r--include/trace/irq_event_types.h33
-rw-r--r--include/trace/kmemtrace.h75
-rw-r--r--include/trace/lockdep.h9
-rw-r--r--include/trace/lockdep_event_types.h44
-rw-r--r--include/trace/power.h34
-rw-r--r--include/trace/sched.h49
-rw-r--r--include/trace/sched_event_types.h151
-rw-r--r--include/trace/trace_event_types.h5
-rw-r--r--include/trace/trace_events.h5
-rw-r--r--include/trace/workqueue.h25
30 files changed, 919 insertions, 261 deletions
diff --git a/include/asm-frv/ftrace.h b/include/asm-frv/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-frv/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 5406e70aba86..9d974914e914 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -61,6 +61,14 @@
61#define BRANCH_PROFILE() 61#define BRANCH_PROFILE()
62#endif 62#endif
63 63
64#ifdef CONFIG_EVENT_TRACER
65#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
66 *(_ftrace_events) \
67 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
68#else
69#define FTRACE_EVENTS()
70#endif
71
64/* .data section */ 72/* .data section */
65#define DATA_DATA \ 73#define DATA_DATA \
66 *(.data) \ 74 *(.data) \
@@ -81,7 +89,8 @@
81 *(__tracepoints) \ 89 *(__tracepoints) \
82 VMLINUX_SYMBOL(__stop___tracepoints) = .; \ 90 VMLINUX_SYMBOL(__stop___tracepoints) = .; \
83 LIKELY_PROFILE() \ 91 LIKELY_PROFILE() \
84 BRANCH_PROFILE() 92 BRANCH_PROFILE() \
93 FTRACE_EVENTS()
85 94
86#define RO_DATA(align) \ 95#define RO_DATA(align) \
87 . = ALIGN((align)); \ 96 . = ALIGN((align)); \
diff --git a/include/asm-m32r/ftrace.h b/include/asm-m32r/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-m32r/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/asm-mn10300/ftrace.h b/include/asm-mn10300/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-mn10300/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 6e915878e88c..d960889e92ef 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -144,6 +144,9 @@ struct blk_user_trace_setup {
144 144
145#ifdef __KERNEL__ 145#ifdef __KERNEL__
146#if defined(CONFIG_BLK_DEV_IO_TRACE) 146#if defined(CONFIG_BLK_DEV_IO_TRACE)
147
148#include <linux/sysfs.h>
149
147struct blk_trace { 150struct blk_trace {
148 int trace_state; 151 int trace_state;
149 struct rchan *rchan; 152 struct rchan *rchan;
@@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
194extern int blk_trace_startstop(struct request_queue *q, int start); 197extern int blk_trace_startstop(struct request_queue *q, int start);
195extern int blk_trace_remove(struct request_queue *q); 198extern int blk_trace_remove(struct request_queue *q);
196 199
200extern struct attribute_group blk_trace_attr_group;
201
197#else /* !CONFIG_BLK_DEV_IO_TRACE */ 202#else /* !CONFIG_BLK_DEV_IO_TRACE */
198#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) 203#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
199#define blk_trace_shutdown(q) do { } while (0) 204#define blk_trace_shutdown(q) do { } while (0)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 677432b9cb7e..498769425eb2 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,15 +1,18 @@
1#ifndef _LINUX_FTRACE_H 1#ifndef _LINUX_FTRACE_H
2#define _LINUX_FTRACE_H 2#define _LINUX_FTRACE_H
3 3
4#include <linux/linkage.h> 4#include <linux/trace_clock.h>
5#include <linux/fs.h>
6#include <linux/ktime.h>
7#include <linux/init.h>
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/kallsyms.h> 5#include <linux/kallsyms.h>
6#include <linux/linkage.h>
11#include <linux/bitops.h> 7#include <linux/bitops.h>
8#include <linux/module.h>
9#include <linux/ktime.h>
12#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/fs.h>
14
15#include <asm/ftrace.h>
13 16
14#ifdef CONFIG_FUNCTION_TRACER 17#ifdef CONFIG_FUNCTION_TRACER
15 18
@@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write,
95 loff_t *ppos); 98 loff_t *ppos);
96#endif 99#endif
97 100
101struct ftrace_func_command {
102 struct list_head list;
103 char *name;
104 int (*func)(char *func, char *cmd,
105 char *params, int enable);
106};
107
98#ifdef CONFIG_DYNAMIC_FTRACE 108#ifdef CONFIG_DYNAMIC_FTRACE
99/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ 109
100#include <asm/ftrace.h> 110int ftrace_arch_code_modify_prepare(void);
111int ftrace_arch_code_modify_post_process(void);
112
113struct seq_file;
114
115struct ftrace_probe_ops {
116 void (*func)(unsigned long ip,
117 unsigned long parent_ip,
118 void **data);
119 int (*callback)(unsigned long ip, void **data);
120 void (*free)(void **data);
121 int (*print)(struct seq_file *m,
122 unsigned long ip,
123 struct ftrace_probe_ops *ops,
124 void *data);
125};
126
127extern int
128register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
129 void *data);
130extern void
131unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
132 void *data);
133extern void
134unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
135extern void unregister_ftrace_function_probe_all(char *glob);
101 136
102enum { 137enum {
103 FTRACE_FL_FREE = (1 << 0), 138 FTRACE_FL_FREE = (1 << 0),
@@ -119,6 +154,9 @@ struct dyn_ftrace {
119int ftrace_force_update(void); 154int ftrace_force_update(void);
120void ftrace_set_filter(unsigned char *buf, int len, int reset); 155void ftrace_set_filter(unsigned char *buf, int len, int reset);
121 156
157int register_ftrace_command(struct ftrace_func_command *cmd);
158int unregister_ftrace_command(struct ftrace_func_command *cmd);
159
122/* defined in arch */ 160/* defined in arch */
123extern int ftrace_ip_converted(unsigned long ip); 161extern int ftrace_ip_converted(unsigned long ip);
124extern int ftrace_dyn_arch_init(void *data); 162extern int ftrace_dyn_arch_init(void *data);
@@ -126,6 +164,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
126extern void ftrace_caller(void); 164extern void ftrace_caller(void);
127extern void ftrace_call(void); 165extern void ftrace_call(void);
128extern void mcount_call(void); 166extern void mcount_call(void);
167
168#ifndef FTRACE_ADDR
169#define FTRACE_ADDR ((unsigned long)ftrace_caller)
170#endif
129#ifdef CONFIG_FUNCTION_GRAPH_TRACER 171#ifdef CONFIG_FUNCTION_GRAPH_TRACER
130extern void ftrace_graph_caller(void); 172extern void ftrace_graph_caller(void);
131extern int ftrace_enable_ftrace_graph_caller(void); 173extern int ftrace_enable_ftrace_graph_caller(void);
@@ -136,7 +178,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
136#endif 178#endif
137 179
138/** 180/**
139 * ftrace_make_nop - convert code into top 181 * ftrace_make_nop - convert code into nop
140 * @mod: module structure if called by module load initialization 182 * @mod: module structure if called by module load initialization
141 * @rec: the mcount call site record 183 * @rec: the mcount call site record
142 * @addr: the address that the call site should be calling 184 * @addr: the address that the call site should be calling
@@ -198,6 +240,14 @@ extern void ftrace_enable_daemon(void);
198# define ftrace_disable_daemon() do { } while (0) 240# define ftrace_disable_daemon() do { } while (0)
199# define ftrace_enable_daemon() do { } while (0) 241# define ftrace_enable_daemon() do { } while (0)
200static inline void ftrace_release(void *start, unsigned long size) { } 242static inline void ftrace_release(void *start, unsigned long size) { }
243static inline int register_ftrace_command(struct ftrace_func_command *cmd)
244{
245 return -EINVAL;
246}
247static inline int unregister_ftrace_command(char *cmd_name)
248{
249 return -EINVAL;
250}
201#endif /* CONFIG_DYNAMIC_FTRACE */ 251#endif /* CONFIG_DYNAMIC_FTRACE */
202 252
203/* totally disable ftrace - can not re-enable after this */ 253/* totally disable ftrace - can not re-enable after this */
@@ -233,24 +283,25 @@ static inline void __ftrace_enabled_restore(int enabled)
233#endif 283#endif
234} 284}
235 285
236#ifdef CONFIG_FRAME_POINTER 286#ifndef HAVE_ARCH_CALLER_ADDR
237/* TODO: need to fix this for ARM */ 287# ifdef CONFIG_FRAME_POINTER
238# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 288# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
239# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) 289# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
240# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) 290# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
241# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) 291# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
242# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) 292# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
243# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) 293# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
244# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) 294# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
245#else 295# else
246# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 296# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
247# define CALLER_ADDR1 0UL 297# define CALLER_ADDR1 0UL
248# define CALLER_ADDR2 0UL 298# define CALLER_ADDR2 0UL
249# define CALLER_ADDR3 0UL 299# define CALLER_ADDR3 0UL
250# define CALLER_ADDR4 0UL 300# define CALLER_ADDR4 0UL
251# define CALLER_ADDR5 0UL 301# define CALLER_ADDR5 0UL
252# define CALLER_ADDR6 0UL 302# define CALLER_ADDR6 0UL
253#endif 303# endif
304#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
254 305
255#ifdef CONFIG_IRQSOFF_TRACER 306#ifdef CONFIG_IRQSOFF_TRACER
256 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 307 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
@@ -268,54 +319,6 @@ static inline void __ftrace_enabled_restore(int enabled)
268# define trace_preempt_off(a0, a1) do { } while (0) 319# define trace_preempt_off(a0, a1) do { } while (0)
269#endif 320#endif
270 321
271#ifdef CONFIG_TRACING
272extern int ftrace_dump_on_oops;
273
274extern void tracing_start(void);
275extern void tracing_stop(void);
276extern void ftrace_off_permanent(void);
277
278extern void
279ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
280
281/**
282 * ftrace_printk - printf formatting in the ftrace buffer
283 * @fmt: the printf format for printing
284 *
285 * Note: __ftrace_printk is an internal function for ftrace_printk and
286 * the @ip is passed in via the ftrace_printk macro.
287 *
288 * This function allows a kernel developer to debug fast path sections
289 * that printk is not appropriate for. By scattering in various
290 * printk like tracing in the code, a developer can quickly see
291 * where problems are occurring.
292 *
293 * This is intended as a debugging tool for the developer only.
294 * Please refrain from leaving ftrace_printks scattered around in
295 * your code.
296 */
297# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
298extern int
299__ftrace_printk(unsigned long ip, const char *fmt, ...)
300 __attribute__ ((format (printf, 2, 3)));
301extern void ftrace_dump(void);
302#else
303static inline void
304ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
305static inline int
306ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
307
308static inline void tracing_start(void) { }
309static inline void tracing_stop(void) { }
310static inline void ftrace_off_permanent(void) { }
311static inline int
312ftrace_printk(const char *fmt, ...)
313{
314 return 0;
315}
316static inline void ftrace_dump(void) { }
317#endif
318
319#ifdef CONFIG_FTRACE_MCOUNT_RECORD 322#ifdef CONFIG_FTRACE_MCOUNT_RECORD
320extern void ftrace_init(void); 323extern void ftrace_init(void);
321extern void ftrace_init_module(struct module *mod, 324extern void ftrace_init_module(struct module *mod,
@@ -327,36 +330,6 @@ ftrace_init_module(struct module *mod,
327 unsigned long *start, unsigned long *end) { } 330 unsigned long *start, unsigned long *end) { }
328#endif 331#endif
329 332
330enum {
331 POWER_NONE = 0,
332 POWER_CSTATE = 1,
333 POWER_PSTATE = 2,
334};
335
336struct power_trace {
337#ifdef CONFIG_POWER_TRACER
338 ktime_t stamp;
339 ktime_t end;
340 int type;
341 int state;
342#endif
343};
344
345#ifdef CONFIG_POWER_TRACER
346extern void trace_power_start(struct power_trace *it, unsigned int type,
347 unsigned int state);
348extern void trace_power_mark(struct power_trace *it, unsigned int type,
349 unsigned int state);
350extern void trace_power_end(struct power_trace *it);
351#else
352static inline void trace_power_start(struct power_trace *it, unsigned int type,
353 unsigned int state) { }
354static inline void trace_power_mark(struct power_trace *it, unsigned int type,
355 unsigned int state) { }
356static inline void trace_power_end(struct power_trace *it) { }
357#endif
358
359
360/* 333/*
361 * Structure that defines an entry function trace. 334 * Structure that defines an entry function trace.
362 */ 335 */
@@ -380,6 +353,30 @@ struct ftrace_graph_ret {
380#ifdef CONFIG_FUNCTION_GRAPH_TRACER 353#ifdef CONFIG_FUNCTION_GRAPH_TRACER
381 354
382/* 355/*
356 * Stack of return addresses for functions
357 * of a thread.
358 * Used in struct thread_info
359 */
360struct ftrace_ret_stack {
361 unsigned long ret;
362 unsigned long func;
363 unsigned long long calltime;
364};
365
366/*
367 * Primary handler of a function return.
368 * It relays on ftrace_return_to_handler.
369 * Defined in entry_32/64.S
370 */
371extern void return_to_handler(void);
372
373extern int
374ftrace_push_return_trace(unsigned long ret, unsigned long long time,
375 unsigned long func, int *depth);
376extern void
377ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
378
379/*
383 * Sometimes we don't want to trace a function with the function 380 * Sometimes we don't want to trace a function with the function
384 * graph tracer but we want them to keep traced by the usual function 381 * graph tracer but we want them to keep traced by the usual function
385 * tracer if the function graph tracer is not configured. 382 * tracer if the function graph tracer is not configured.
@@ -490,6 +487,21 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
490 return tsk->trace & TSK_TRACE_FL_GRAPH; 487 return tsk->trace & TSK_TRACE_FL_GRAPH;
491} 488}
492 489
490extern int ftrace_dump_on_oops;
491
493#endif /* CONFIG_TRACING */ 492#endif /* CONFIG_TRACING */
494 493
494
495#ifdef CONFIG_HW_BRANCH_TRACER
496
497void trace_hw_branch(u64 from, u64 to);
498void trace_hw_branch_oops(void);
499
500#else /* CONFIG_HW_BRANCH_TRACER */
501
502static inline void trace_hw_branch(u64 from, u64 to) {}
503static inline void trace_hw_branch_oops(void) {}
504
505#endif /* CONFIG_HW_BRANCH_TRACER */
506
495#endif /* _LINUX_FTRACE_H */ 507#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 366a054d0b05..dca7bf8cffe2 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -2,7 +2,7 @@
2#define _LINUX_FTRACE_IRQ_H 2#define _LINUX_FTRACE_IRQ_H
3 3
4 4
5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) 5#ifdef CONFIG_FTRACE_NMI_ENTER
6extern void ftrace_nmi_enter(void); 6extern void ftrace_nmi_enter(void);
7extern void ftrace_nmi_exit(void); 7extern void ftrace_nmi_exit(void);
8#else 8#else
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f83288347dda..faa1cf848bcd 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -15,55 +15,61 @@
15 * - bits 0-7 are the preemption count (max preemption depth: 256) 15 * - bits 0-7 are the preemption count (max preemption depth: 256)
16 * - bits 8-15 are the softirq count (max # of softirqs: 256) 16 * - bits 8-15 are the softirq count (max # of softirqs: 256)
17 * 17 *
18 * The hardirq count can be overridden per architecture, the default is: 18 * The hardirq count can in theory reach the same as NR_IRQS.
19 * In reality, the number of nested IRQS is limited to the stack
20 * size as well. For archs with over 1000 IRQS it is not practical
21 * to expect that they will all nest. We give a max of 10 bits for
22 * hardirq nesting. An arch may choose to give less than 10 bits.
23 * m68k expects it to be 8.
19 * 24 *
20 * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) 25 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
21 * - ( bit 28 is the PREEMPT_ACTIVE flag. ) 26 * - bit 26 is the NMI_MASK
27 * - bit 28 is the PREEMPT_ACTIVE flag
22 * 28 *
23 * PREEMPT_MASK: 0x000000ff 29 * PREEMPT_MASK: 0x000000ff
24 * SOFTIRQ_MASK: 0x0000ff00 30 * SOFTIRQ_MASK: 0x0000ff00
25 * HARDIRQ_MASK: 0x0fff0000 31 * HARDIRQ_MASK: 0x03ff0000
32 * NMI_MASK: 0x04000000
26 */ 33 */
27#define PREEMPT_BITS 8 34#define PREEMPT_BITS 8
28#define SOFTIRQ_BITS 8 35#define SOFTIRQ_BITS 8
36#define NMI_BITS 1
29 37
30#ifndef HARDIRQ_BITS 38#define MAX_HARDIRQ_BITS 10
31#define HARDIRQ_BITS 12
32 39
33#ifndef MAX_HARDIRQS_PER_CPU 40#ifndef HARDIRQ_BITS
34#define MAX_HARDIRQS_PER_CPU NR_IRQS 41# define HARDIRQ_BITS MAX_HARDIRQ_BITS
35#endif 42#endif
36 43
37/* 44#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
38 * The hardirq mask has to be large enough to have space for potentially 45#error HARDIRQ_BITS too high!
39 * all IRQ sources in the system nesting on a single CPU.
40 */
41#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
42# error HARDIRQ_BITS is too low!
43#endif
44#endif 46#endif
45 47
46#define PREEMPT_SHIFT 0 48#define PREEMPT_SHIFT 0
47#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) 49#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
48#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) 50#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
51#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
49 52
50#define __IRQ_MASK(x) ((1UL << (x))-1) 53#define __IRQ_MASK(x) ((1UL << (x))-1)
51 54
52#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) 55#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
53#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) 56#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
54#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) 57#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
58#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
55 59
56#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) 60#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
57#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) 61#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
58#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) 62#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
63#define NMI_OFFSET (1UL << NMI_SHIFT)
59 64
60#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) 65#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
61#error PREEMPT_ACTIVE is too low! 66#error PREEMPT_ACTIVE is too low!
62#endif 67#endif
63 68
64#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 69#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
65#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 70#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
66#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) 71#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
72 | NMI_MASK))
67 73
68/* 74/*
69 * Are we doing bottom half or hardware interrupt processing? 75 * Are we doing bottom half or hardware interrupt processing?
@@ -73,6 +79,11 @@
73#define in_softirq() (softirq_count()) 79#define in_softirq() (softirq_count())
74#define in_interrupt() (irq_count()) 80#define in_interrupt() (irq_count())
75 81
82/*
83 * Are we in NMI context?
84 */
85#define in_nmi() (preempt_count() & NMI_MASK)
86
76#if defined(CONFIG_PREEMPT) 87#if defined(CONFIG_PREEMPT)
77# define PREEMPT_INATOMIC_BASE kernel_locked() 88# define PREEMPT_INATOMIC_BASE kernel_locked()
78# define PREEMPT_CHECK_OFFSET 1 89# define PREEMPT_CHECK_OFFSET 1
@@ -164,20 +175,24 @@ extern void irq_enter(void);
164 */ 175 */
165extern void irq_exit(void); 176extern void irq_exit(void);
166 177
167#define nmi_enter() \ 178#define nmi_enter() \
168 do { \ 179 do { \
169 ftrace_nmi_enter(); \ 180 ftrace_nmi_enter(); \
170 lockdep_off(); \ 181 BUG_ON(in_nmi()); \
171 rcu_nmi_enter(); \ 182 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
172 __irq_enter(); \ 183 lockdep_off(); \
184 rcu_nmi_enter(); \
185 trace_hardirq_enter(); \
173 } while (0) 186 } while (0)
174 187
175#define nmi_exit() \ 188#define nmi_exit() \
176 do { \ 189 do { \
177 __irq_exit(); \ 190 trace_hardirq_exit(); \
178 rcu_nmi_exit(); \ 191 rcu_nmi_exit(); \
179 lockdep_on(); \ 192 lockdep_on(); \
180 ftrace_nmi_exit(); \ 193 BUG_ON(!in_nmi()); \
194 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
195 ftrace_nmi_exit(); \
181 } while (0) 196 } while (0)
182 197
183#endif /* LINUX_HARDIRQ_H */ 198#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 7fa371898e3e..08bf5da86676 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -368,6 +368,64 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
368#endif 368#endif
369 369
370/* 370/*
371 * General tracing related utility functions - trace_printk(),
372 * tracing_start()/tracing_stop:
373 */
374#ifdef CONFIG_TRACING
375extern void tracing_start(void);
376extern void tracing_stop(void);
377extern void ftrace_off_permanent(void);
378
379extern void
380ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
381
382/**
383 * trace_printk - printf formatting in the ftrace buffer
384 * @fmt: the printf format for printing
385 *
386 * Note: __trace_printk is an internal function for trace_printk and
387 * the @ip is passed in via the trace_printk macro.
388 *
389 * This function allows a kernel developer to debug fast path sections
390 * that printk is not appropriate for. By scattering in various
391 * printk like tracing in the code, a developer can quickly see
392 * where problems are occurring.
393 *
394 * This is intended as a debugging tool for the developer only.
395 * Please refrain from leaving trace_printks scattered around in
396 * your code.
397 */
398# define trace_printk(fmt...) __trace_printk(_THIS_IP_, fmt)
399extern int
400__trace_printk(unsigned long ip, const char *fmt, ...)
401 __attribute__ ((format (printf, 2, 3)));
402# define ftrace_vprintk(fmt, ap) __trace_printk(_THIS_IP_, fmt, ap)
403extern int
404__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
405extern void ftrace_dump(void);
406#else
407static inline void
408ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
409static inline int
410trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
411
412static inline void tracing_start(void) { }
413static inline void tracing_stop(void) { }
414static inline void ftrace_off_permanent(void) { }
415static inline int
416trace_printk(const char *fmt, ...)
417{
418 return 0;
419}
420static inline int
421ftrace_vprintk(const char *fmt, va_list ap)
422{
423 return 0;
424}
425static inline void ftrace_dump(void) { }
426#endif
427
428/*
371 * Display an IP address in readable format. 429 * Display an IP address in readable format.
372 */ 430 */
373 431
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 23bf02fb124f..5a58ea3e91e9 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -20,43 +20,10 @@ struct lockdep_map;
20#include <linux/stacktrace.h> 20#include <linux/stacktrace.h>
21 21
22/* 22/*
23 * Lock-class usage-state bits: 23 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
24 * the total number of states... :-(
24 */ 25 */
25enum lock_usage_bit 26#define XXX_LOCK_USAGE_STATES (1+3*4)
26{
27 LOCK_USED = 0,
28 LOCK_USED_IN_HARDIRQ,
29 LOCK_USED_IN_SOFTIRQ,
30 LOCK_ENABLED_SOFTIRQS,
31 LOCK_ENABLED_HARDIRQS,
32 LOCK_USED_IN_HARDIRQ_READ,
33 LOCK_USED_IN_SOFTIRQ_READ,
34 LOCK_ENABLED_SOFTIRQS_READ,
35 LOCK_ENABLED_HARDIRQS_READ,
36 LOCK_USAGE_STATES
37};
38
39/*
40 * Usage-state bitmasks:
41 */
42#define LOCKF_USED (1 << LOCK_USED)
43#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
44#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
45#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
46#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
47
48#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
49#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
50
51#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
52#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
53#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
54#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
55
56#define LOCKF_ENABLED_IRQS_READ \
57 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
58#define LOCKF_USED_IN_IRQ_READ \
59 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
60 27
61#define MAX_LOCKDEP_SUBCLASSES 8UL 28#define MAX_LOCKDEP_SUBCLASSES 8UL
62 29
@@ -97,7 +64,7 @@ struct lock_class {
97 * IRQ/softirq usage tracking bits: 64 * IRQ/softirq usage tracking bits:
98 */ 65 */
99 unsigned long usage_mask; 66 unsigned long usage_mask;
100 struct stack_trace usage_traces[LOCK_USAGE_STATES]; 67 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
101 68
102 /* 69 /*
103 * These fields represent a directed graph of lock dependencies, 70 * These fields represent a directed graph of lock dependencies,
@@ -324,7 +291,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
324 lock_set_class(lock, lock->name, lock->key, subclass, ip); 291 lock_set_class(lock, lock->name, lock->key, subclass, ip);
325} 292}
326 293
327# define INIT_LOCKDEP .lockdep_recursion = 0, 294extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
295extern void lockdep_clear_current_reclaim_state(void);
296extern void lockdep_trace_alloc(gfp_t mask);
297
298# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
328 299
329#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 300#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
330 301
@@ -342,6 +313,9 @@ static inline void lockdep_on(void)
342# define lock_release(l, n, i) do { } while (0) 313# define lock_release(l, n, i) do { } while (0)
343# define lock_set_class(l, n, k, s, i) do { } while (0) 314# define lock_set_class(l, n, k, s, i) do { } while (0)
344# define lock_set_subclass(l, s, i) do { } while (0) 315# define lock_set_subclass(l, s, i) do { } while (0)
316# define lockdep_set_current_reclaim_state(g) do { } while (0)
317# define lockdep_clear_current_reclaim_state() do { } while (0)
318# define lockdep_trace_alloc(g) do { } while (0)
345# define lockdep_init() do { } while (0) 319# define lockdep_init() do { } while (0)
346# define lockdep_info() do { } while (0) 320# define lockdep_info() do { } while (0)
347# define lockdep_init_map(lock, name, key, sub) \ 321# define lockdep_init_map(lock, name, key, sub) \
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 7a0e5c4f8072..3069ec7e0ab8 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -50,8 +50,10 @@ struct mutex {
50 atomic_t count; 50 atomic_t count;
51 spinlock_t wait_lock; 51 spinlock_t wait_lock;
52 struct list_head wait_list; 52 struct list_head wait_list;
53#ifdef CONFIG_DEBUG_MUTEXES 53#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
54 struct thread_info *owner; 54 struct thread_info *owner;
55#endif
56#ifdef CONFIG_DEBUG_MUTEXES
55 const char *name; 57 const char *name;
56 void *magic; 58 void *magic;
57#endif 59#endif
@@ -68,7 +70,6 @@ struct mutex_waiter {
68 struct list_head list; 70 struct list_head list;
69 struct task_struct *task; 71 struct task_struct *task;
70#ifdef CONFIG_DEBUG_MUTEXES 72#ifdef CONFIG_DEBUG_MUTEXES
71 struct mutex *lock;
72 void *magic; 73 void *magic;
73#endif 74#endif
74}; 75};
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b3b359660082..79fcbc4b09d6 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -8,7 +8,7 @@ struct ring_buffer;
8struct ring_buffer_iter; 8struct ring_buffer_iter;
9 9
10/* 10/*
11 * Don't reference this struct directly, use functions below. 11 * Don't refer to this struct directly, use functions below.
12 */ 12 */
13struct ring_buffer_event { 13struct ring_buffer_event {
14 u32 type:2, len:3, time_delta:27; 14 u32 type:2, len:3, time_delta:27;
@@ -74,13 +74,10 @@ void ring_buffer_free(struct ring_buffer *buffer);
74 74
75int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); 75int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
76 76
77struct ring_buffer_event * 77struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
78ring_buffer_lock_reserve(struct ring_buffer *buffer, 78 unsigned long length);
79 unsigned long length,
80 unsigned long *flags);
81int ring_buffer_unlock_commit(struct ring_buffer *buffer, 79int ring_buffer_unlock_commit(struct ring_buffer *buffer,
82 struct ring_buffer_event *event, 80 struct ring_buffer_event *event);
83 unsigned long flags);
84int ring_buffer_write(struct ring_buffer *buffer, 81int ring_buffer_write(struct ring_buffer *buffer,
85 unsigned long length, void *data); 82 unsigned long length, void *data);
86 83
@@ -124,14 +121,28 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
124u64 ring_buffer_time_stamp(int cpu); 121u64 ring_buffer_time_stamp(int cpu);
125void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); 122void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
126 123
124size_t ring_buffer_page_len(void *page);
125
126
127/*
128 * The below functions are fine to use outside the tracing facility.
129 */
130#ifdef CONFIG_RING_BUFFER
127void tracing_on(void); 131void tracing_on(void);
128void tracing_off(void); 132void tracing_off(void);
129void tracing_off_permanent(void); 133void tracing_off_permanent(void);
134int tracing_is_on(void);
135#else
136static inline void tracing_on(void) { }
137static inline void tracing_off(void) { }
138static inline void tracing_off_permanent(void) { }
139static inline int tracing_is_on(void) { return 0; }
140#endif
130 141
131void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); 142void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
132void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); 143void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
133int ring_buffer_read_page(struct ring_buffer *buffer, 144int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
134 void **data_page, int cpu, int full); 145 size_t len, int cpu, int full);
135 146
136enum ring_buffer_flags { 147enum ring_buffer_flags {
137 RB_FL_OVERWRITE = 1 << 0, 148 RB_FL_OVERWRITE = 1 << 0,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a7c7698583bb..5b9424eaa58f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -137,6 +137,8 @@ extern unsigned long nr_uninterruptible(void);
137extern unsigned long nr_active(void); 137extern unsigned long nr_active(void);
138extern unsigned long nr_iowait(void); 138extern unsigned long nr_iowait(void);
139 139
140extern unsigned long get_parent_ip(unsigned long addr);
141
140struct seq_file; 142struct seq_file;
141struct cfs_rq; 143struct cfs_rq;
142struct task_group; 144struct task_group;
@@ -331,7 +333,9 @@ extern signed long schedule_timeout(signed long timeout);
331extern signed long schedule_timeout_interruptible(signed long timeout); 333extern signed long schedule_timeout_interruptible(signed long timeout);
332extern signed long schedule_timeout_killable(signed long timeout); 334extern signed long schedule_timeout_killable(signed long timeout);
333extern signed long schedule_timeout_uninterruptible(signed long timeout); 335extern signed long schedule_timeout_uninterruptible(signed long timeout);
336asmlinkage void __schedule(void);
334asmlinkage void schedule(void); 337asmlinkage void schedule(void);
338extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
335 339
336struct nsproxy; 340struct nsproxy;
337struct user_namespace; 341struct user_namespace;
@@ -1327,6 +1331,7 @@ struct task_struct {
1327 int lockdep_depth; 1331 int lockdep_depth;
1328 unsigned int lockdep_recursion; 1332 unsigned int lockdep_recursion;
1329 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1333 struct held_lock held_locks[MAX_LOCK_DEPTH];
1334 gfp_t lockdep_reclaim_gfp;
1330#endif 1335#endif
1331 1336
1332/* journalling filesystem info */ 1337/* journalling filesystem info */
@@ -1669,6 +1674,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1669 return set_cpus_allowed_ptr(p, &new_mask); 1674 return set_cpus_allowed_ptr(p, &new_mask);
1670} 1675}
1671 1676
1677/*
1678 * Architectures can set this to 1 if they have specified
1679 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1680 * but then during bootup it turns out that sched_clock()
1681 * is reliable after all:
1682 */
1683#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1684extern int sched_clock_stable;
1685#endif
1686
1672extern unsigned long long sched_clock(void); 1687extern unsigned long long sched_clock(void);
1673 1688
1674extern void sched_clock_init(void); 1689extern void sched_clock_init(void);
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 6ca6a7b66d75..f4523651fa42 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -14,6 +14,7 @@
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ 14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ 15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <trace/kmemtrace.h>
17 18
18/* Size description struct for general caches. */ 19/* Size description struct for general caches. */
19struct cache_sizes { 20struct cache_sizes {
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[];
28void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 29void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
29void *__kmalloc(size_t size, gfp_t flags); 30void *__kmalloc(size_t size, gfp_t flags);
30 31
31static inline void *kmalloc(size_t size, gfp_t flags) 32#ifdef CONFIG_KMEMTRACE
33extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
34extern size_t slab_buffer_size(struct kmem_cache *cachep);
35#else
36static __always_inline void *
37kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
32{ 38{
39 return kmem_cache_alloc(cachep, flags);
40}
41static inline size_t slab_buffer_size(struct kmem_cache *cachep)
42{
43 return 0;
44}
45#endif
46
47static __always_inline void *kmalloc(size_t size, gfp_t flags)
48{
49 struct kmem_cache *cachep;
50 void *ret;
51
33 if (__builtin_constant_p(size)) { 52 if (__builtin_constant_p(size)) {
34 int i = 0; 53 int i = 0;
35 54
@@ -47,10 +66,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
47found: 66found:
48#ifdef CONFIG_ZONE_DMA 67#ifdef CONFIG_ZONE_DMA
49 if (flags & GFP_DMA) 68 if (flags & GFP_DMA)
50 return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, 69 cachep = malloc_sizes[i].cs_dmacachep;
51 flags); 70 else
52#endif 71#endif
53 return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); 72 cachep = malloc_sizes[i].cs_cachep;
73
74 ret = kmem_cache_alloc_notrace(cachep, flags);
75
76 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
77 size, slab_buffer_size(cachep), flags);
78
79 return ret;
54 } 80 }
55 return __kmalloc(size, flags); 81 return __kmalloc(size, flags);
56} 82}
@@ -59,8 +85,25 @@ found:
59extern void *__kmalloc_node(size_t size, gfp_t flags, int node); 85extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
60extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 86extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
61 87
62static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 88#ifdef CONFIG_KMEMTRACE
89extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
90 gfp_t flags,
91 int nodeid);
92#else
93static __always_inline void *
94kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
95 gfp_t flags,
96 int nodeid)
97{
98 return kmem_cache_alloc_node(cachep, flags, nodeid);
99}
100#endif
101
102static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63{ 103{
104 struct kmem_cache *cachep;
105 void *ret;
106
64 if (__builtin_constant_p(size)) { 107 if (__builtin_constant_p(size)) {
65 int i = 0; 108 int i = 0;
66 109
@@ -78,11 +121,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
78found: 121found:
79#ifdef CONFIG_ZONE_DMA 122#ifdef CONFIG_ZONE_DMA
80 if (flags & GFP_DMA) 123 if (flags & GFP_DMA)
81 return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, 124 cachep = malloc_sizes[i].cs_dmacachep;
82 flags, node); 125 else
83#endif 126#endif
84 return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, 127 cachep = malloc_sizes[i].cs_cachep;
85 flags, node); 128
129 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
130
131 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_,
132 ret, size, slab_buffer_size(cachep),
133 flags, node);
134
135 return ret;
86 } 136 }
87 return __kmalloc_node(size, flags, node); 137 return __kmalloc_node(size, flags, node);
88} 138}
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 59a3fa476ab9..0ec00b39d006 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -3,14 +3,15 @@
3 3
4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
5 5
6static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 6static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
7 gfp_t flags)
7{ 8{
8 return kmem_cache_alloc_node(cachep, flags, -1); 9 return kmem_cache_alloc_node(cachep, flags, -1);
9} 10}
10 11
11void *__kmalloc_node(size_t size, gfp_t flags, int node); 12void *__kmalloc_node(size_t size, gfp_t flags, int node);
12 13
13static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 14static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
14{ 15{
15 return __kmalloc_node(size, flags, node); 16 return __kmalloc_node(size, flags, node);
16} 17}
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
23 * kmalloc is the normal method of allocating memory 24 * kmalloc is the normal method of allocating memory
24 * in the kernel. 25 * in the kernel.
25 */ 26 */
26static inline void *kmalloc(size_t size, gfp_t flags) 27static __always_inline void *kmalloc(size_t size, gfp_t flags)
27{ 28{
28 return __kmalloc_node(size, flags, -1); 29 return __kmalloc_node(size, flags, -1);
29} 30}
30 31
31static inline void *__kmalloc(size_t size, gfp_t flags) 32static __always_inline void *__kmalloc(size_t size, gfp_t flags)
32{ 33{
33 return kmalloc(size, flags); 34 return kmalloc(size, flags);
34} 35}
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 2f5c16b1aacd..9e3a575b2c30 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,6 +10,7 @@
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/kobject.h> 12#include <linux/kobject.h>
13#include <trace/kmemtrace.h>
13 14
14enum stat_item { 15enum stat_item {
15 ALLOC_FASTPATH, /* Allocation from cpu slab */ 16 ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -121,10 +122,23 @@ struct kmem_cache {
121#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) 122#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
122 123
123/* 124/*
125 * Maximum kmalloc object size handled by SLUB. Larger object allocations
126 * are passed through to the page allocator. The page allocator "fastpath"
127 * is relatively slow so we need this value sufficiently high so that
128 * performance critical objects are allocated through the SLUB fastpath.
129 *
130 * This should be dropped to PAGE_SIZE / 2 once the page allocator
131 * "fastpath" becomes competitive with the slab allocator fastpaths.
132 */
133#define SLUB_MAX_SIZE (PAGE_SIZE)
134
135#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1)
136
137/*
124 * We keep the general caches in an array of slab caches that are used for 138 * We keep the general caches in an array of slab caches that are used for
125 * 2^x bytes of allocations. 139 * 2^x bytes of allocations.
126 */ 140 */
127extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; 141extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
128 142
129/* 143/*
130 * Sorry that the following has to be that ugly but some versions of GCC 144 * Sorry that the following has to be that ugly but some versions of GCC
@@ -204,15 +218,33 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
204void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 218void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
205void *__kmalloc(size_t size, gfp_t flags); 219void *__kmalloc(size_t size, gfp_t flags);
206 220
221#ifdef CONFIG_KMEMTRACE
222extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
223#else
224static __always_inline void *
225kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
226{
227 return kmem_cache_alloc(s, gfpflags);
228}
229#endif
230
207static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 231static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
208{ 232{
209 return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); 233 unsigned int order = get_order(size);
234 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
235
236 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
237 size, PAGE_SIZE << order, flags);
238
239 return ret;
210} 240}
211 241
212static __always_inline void *kmalloc(size_t size, gfp_t flags) 242static __always_inline void *kmalloc(size_t size, gfp_t flags)
213{ 243{
244 void *ret;
245
214 if (__builtin_constant_p(size)) { 246 if (__builtin_constant_p(size)) {
215 if (size > PAGE_SIZE) 247 if (size > SLUB_MAX_SIZE)
216 return kmalloc_large(size, flags); 248 return kmalloc_large(size, flags);
217 249
218 if (!(flags & SLUB_DMA)) { 250 if (!(flags & SLUB_DMA)) {
@@ -221,7 +253,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
221 if (!s) 253 if (!s)
222 return ZERO_SIZE_PTR; 254 return ZERO_SIZE_PTR;
223 255
224 return kmem_cache_alloc(s, flags); 256 ret = kmem_cache_alloc_notrace(s, flags);
257
258 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
259 _THIS_IP_, ret,
260 size, s->size, flags);
261
262 return ret;
225 } 263 }
226 } 264 }
227 return __kmalloc(size, flags); 265 return __kmalloc(size, flags);
@@ -231,16 +269,38 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
231void *__kmalloc_node(size_t size, gfp_t flags, int node); 269void *__kmalloc_node(size_t size, gfp_t flags, int node);
232void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 270void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
233 271
272#ifdef CONFIG_KMEMTRACE
273extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
274 gfp_t gfpflags,
275 int node);
276#else
277static __always_inline void *
278kmem_cache_alloc_node_notrace(struct kmem_cache *s,
279 gfp_t gfpflags,
280 int node)
281{
282 return kmem_cache_alloc_node(s, gfpflags, node);
283}
284#endif
285
234static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 286static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
235{ 287{
288 void *ret;
289
236 if (__builtin_constant_p(size) && 290 if (__builtin_constant_p(size) &&
237 size <= PAGE_SIZE && !(flags & SLUB_DMA)) { 291 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
238 struct kmem_cache *s = kmalloc_slab(size); 292 struct kmem_cache *s = kmalloc_slab(size);
239 293
240 if (!s) 294 if (!s)
241 return ZERO_SIZE_PTR; 295 return ZERO_SIZE_PTR;
242 296
243 return kmem_cache_alloc_node(s, flags, node); 297 ret = kmem_cache_alloc_node_notrace(s, flags, node);
298
299 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
300 _THIS_IP_, ret,
301 size, s->size, flags, node);
302
303 return ret;
244 } 304 }
245 return __kmalloc_node(size, flags, node); 305 return __kmalloc_node(size, flags, node);
246} 306}
diff --git a/include/linux/timer.h b/include/linux/timer.h
index daf9685b861c..51774eb87cc6 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -5,6 +5,7 @@
5#include <linux/ktime.h> 5#include <linux/ktime.h>
6#include <linux/stddef.h> 6#include <linux/stddef.h>
7#include <linux/debugobjects.h> 7#include <linux/debugobjects.h>
8#include <linux/stringify.h>
8 9
9struct tvec_base; 10struct tvec_base;
10 11
@@ -21,52 +22,126 @@ struct timer_list {
21 char start_comm[16]; 22 char start_comm[16];
22 int start_pid; 23 int start_pid;
23#endif 24#endif
25#ifdef CONFIG_LOCKDEP
26 struct lockdep_map lockdep_map;
27#endif
24}; 28};
25 29
26extern struct tvec_base boot_tvec_bases; 30extern struct tvec_base boot_tvec_bases;
27 31
32#ifdef CONFIG_LOCKDEP
33/*
34 * NB: because we have to copy the lockdep_map, setting the lockdep_map key
35 * (second argument) here is required, otherwise it could be initialised to
36 * the copy of the lockdep_map later! We use the pointer to and the string
37 * "<file>:<line>" as the key resp. the name of the lockdep_map.
38 */
39#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \
40 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn),
41#else
42#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
43#endif
44
28#define TIMER_INITIALIZER(_function, _expires, _data) { \ 45#define TIMER_INITIALIZER(_function, _expires, _data) { \
29 .entry = { .prev = TIMER_ENTRY_STATIC }, \ 46 .entry = { .prev = TIMER_ENTRY_STATIC }, \
30 .function = (_function), \ 47 .function = (_function), \
31 .expires = (_expires), \ 48 .expires = (_expires), \
32 .data = (_data), \ 49 .data = (_data), \
33 .base = &boot_tvec_bases, \ 50 .base = &boot_tvec_bases, \
51 __TIMER_LOCKDEP_MAP_INITIALIZER( \
52 __FILE__ ":" __stringify(__LINE__)) \
34 } 53 }
35 54
36#define DEFINE_TIMER(_name, _function, _expires, _data) \ 55#define DEFINE_TIMER(_name, _function, _expires, _data) \
37 struct timer_list _name = \ 56 struct timer_list _name = \
38 TIMER_INITIALIZER(_function, _expires, _data) 57 TIMER_INITIALIZER(_function, _expires, _data)
39 58
40void init_timer(struct timer_list *timer); 59void init_timer_key(struct timer_list *timer,
41void init_timer_deferrable(struct timer_list *timer); 60 const char *name,
61 struct lock_class_key *key);
62void init_timer_deferrable_key(struct timer_list *timer,
63 const char *name,
64 struct lock_class_key *key);
65
66#ifdef CONFIG_LOCKDEP
67#define init_timer(timer) \
68 do { \
69 static struct lock_class_key __key; \
70 init_timer_key((timer), #timer, &__key); \
71 } while (0)
72
73#define init_timer_deferrable(timer) \
74 do { \
75 static struct lock_class_key __key; \
76 init_timer_deferrable_key((timer), #timer, &__key); \
77 } while (0)
78
79#define init_timer_on_stack(timer) \
80 do { \
81 static struct lock_class_key __key; \
82 init_timer_on_stack_key((timer), #timer, &__key); \
83 } while (0)
84
85#define setup_timer(timer, fn, data) \
86 do { \
87 static struct lock_class_key __key; \
88 setup_timer_key((timer), #timer, &__key, (fn), (data));\
89 } while (0)
90
91#define setup_timer_on_stack(timer, fn, data) \
92 do { \
93 static struct lock_class_key __key; \
94 setup_timer_on_stack_key((timer), #timer, &__key, \
95 (fn), (data)); \
96 } while (0)
97#else
98#define init_timer(timer)\
99 init_timer_key((timer), NULL, NULL)
100#define init_timer_deferrable(timer)\
101 init_timer_deferrable_key((timer), NULL, NULL)
102#define init_timer_on_stack(timer)\
103 init_timer_on_stack_key((timer), NULL, NULL)
104#define setup_timer(timer, fn, data)\
105 setup_timer_key((timer), NULL, NULL, (fn), (data))
106#define setup_timer_on_stack(timer, fn, data)\
107 setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data))
108#endif
42 109
43#ifdef CONFIG_DEBUG_OBJECTS_TIMERS 110#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
44extern void init_timer_on_stack(struct timer_list *timer); 111extern void init_timer_on_stack_key(struct timer_list *timer,
112 const char *name,
113 struct lock_class_key *key);
45extern void destroy_timer_on_stack(struct timer_list *timer); 114extern void destroy_timer_on_stack(struct timer_list *timer);
46#else 115#else
47static inline void destroy_timer_on_stack(struct timer_list *timer) { } 116static inline void destroy_timer_on_stack(struct timer_list *timer) { }
48static inline void init_timer_on_stack(struct timer_list *timer) 117static inline void init_timer_on_stack_key(struct timer_list *timer,
118 const char *name,
119 struct lock_class_key *key)
49{ 120{
50 init_timer(timer); 121 init_timer_key(timer, name, key);
51} 122}
52#endif 123#endif
53 124
54static inline void setup_timer(struct timer_list * timer, 125static inline void setup_timer_key(struct timer_list * timer,
126 const char *name,
127 struct lock_class_key *key,
55 void (*function)(unsigned long), 128 void (*function)(unsigned long),
56 unsigned long data) 129 unsigned long data)
57{ 130{
58 timer->function = function; 131 timer->function = function;
59 timer->data = data; 132 timer->data = data;
60 init_timer(timer); 133 init_timer_key(timer, name, key);
61} 134}
62 135
63static inline void setup_timer_on_stack(struct timer_list *timer, 136static inline void setup_timer_on_stack_key(struct timer_list *timer,
137 const char *name,
138 struct lock_class_key *key,
64 void (*function)(unsigned long), 139 void (*function)(unsigned long),
65 unsigned long data) 140 unsigned long data)
66{ 141{
67 timer->function = function; 142 timer->function = function;
68 timer->data = data; 143 timer->data = data;
69 init_timer_on_stack(timer); 144 init_timer_on_stack_key(timer, name, key);
70} 145}
71 146
72/** 147/**
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
new file mode 100644
index 000000000000..7a8130384087
--- /dev/null
+++ b/include/linux/trace_clock.h
@@ -0,0 +1,19 @@
1#ifndef _LINUX_TRACE_CLOCK_H
2#define _LINUX_TRACE_CLOCK_H
3
4/*
5 * 3 trace clock variants, with differing scalability/precision
6 * tradeoffs:
7 *
8 * - local: CPU-local trace clock
9 * - medium: scalable global clock with some jitter
10 * - global: globally monotonic, serialized clock
11 */
12#include <linux/compiler.h>
13#include <linux/types.h>
14
15extern u64 notrace trace_clock_local(void);
16extern u64 notrace trace_clock(void);
17extern u64 notrace trace_clock_global(void);
18
19#endif /* _LINUX_TRACE_CLOCK_H */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 757005458366..152b2f03fb86 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -153,4 +153,11 @@ static inline void tracepoint_synchronize_unregister(void)
153 synchronize_sched(); 153 synchronize_sched();
154} 154}
155 155
156#define PARAMS(args...) args
157#define TRACE_FORMAT(name, proto, args, fmt) \
158 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
159
160#define TRACE_EVENT_FORMAT(name, proto, args, fmt, struct, tpfmt) \
161 TRACE_FORMAT(name, PARAMS(proto), PARAMS(args), PARAMS(fmt))
162
156#endif 163#endif
diff --git a/include/trace/irq.h b/include/trace/irq.h
new file mode 100644
index 000000000000..ff5d4495dc37
--- /dev/null
+++ b/include/trace/irq.h
@@ -0,0 +1,9 @@
1#ifndef _TRACE_IRQ_H
2#define _TRACE_IRQ_H
3
4#include <linux/interrupt.h>
5#include <linux/tracepoint.h>
6
7#include <trace/irq_event_types.h>
8
9#endif
diff --git a/include/trace/irq_event_types.h b/include/trace/irq_event_types.h
new file mode 100644
index 000000000000..65850bc5ea06
--- /dev/null
+++ b/include/trace/irq_event_types.h
@@ -0,0 +1,33 @@
1
2/* use <trace/irq.h> instead */
3#ifndef TRACE_FORMAT
4# error Do not include this file directly.
5# error Unless you know what you are doing.
6#endif
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM irq
10
11TRACE_EVENT_FORMAT(irq_handler_entry,
12 TPPROTO(int irq, struct irqaction *action),
13 TPARGS(irq, action),
14 TPFMT("irq=%d handler=%s", irq, action->name),
15 TRACE_STRUCT(
16 TRACE_FIELD(int, irq, irq)
17 ),
18 TPRAWFMT("irq %d")
19 );
20
21TRACE_EVENT_FORMAT(irq_handler_exit,
22 TPPROTO(int irq, struct irqaction *action, int ret),
23 TPARGS(irq, action, ret),
24 TPFMT("irq=%d handler=%s return=%s",
25 irq, action->name, ret ? "handled" : "unhandled"),
26 TRACE_STRUCT(
27 TRACE_FIELD(int, irq, irq)
28 TRACE_FIELD(int, ret, ret)
29 ),
30 TPRAWFMT("irq %d ret %d")
31 );
32
33#undef TRACE_SYSTEM
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h
new file mode 100644
index 000000000000..ad8b7857855a
--- /dev/null
+++ b/include/trace/kmemtrace.h
@@ -0,0 +1,75 @@
1/*
2 * Copyright (C) 2008 Eduard - Gabriel Munteanu
3 *
4 * This file is released under GPL version 2.
5 */
6
7#ifndef _LINUX_KMEMTRACE_H
8#define _LINUX_KMEMTRACE_H
9
10#ifdef __KERNEL__
11
12#include <linux/types.h>
13#include <linux/marker.h>
14
15enum kmemtrace_type_id {
16 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
17 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
18 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
19};
20
21#ifdef CONFIG_KMEMTRACE
22
23extern void kmemtrace_init(void);
24
25extern void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
26 unsigned long call_site,
27 const void *ptr,
28 size_t bytes_req,
29 size_t bytes_alloc,
30 gfp_t gfp_flags,
31 int node);
32
33extern void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
34 unsigned long call_site,
35 const void *ptr);
36
37#else /* CONFIG_KMEMTRACE */
38
39static inline void kmemtrace_init(void)
40{
41}
42
43static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
44 unsigned long call_site,
45 const void *ptr,
46 size_t bytes_req,
47 size_t bytes_alloc,
48 gfp_t gfp_flags,
49 int node)
50{
51}
52
53static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
54 unsigned long call_site,
55 const void *ptr)
56{
57}
58
59#endif /* CONFIG_KMEMTRACE */
60
61static inline void kmemtrace_mark_alloc(enum kmemtrace_type_id type_id,
62 unsigned long call_site,
63 const void *ptr,
64 size_t bytes_req,
65 size_t bytes_alloc,
66 gfp_t gfp_flags)
67{
68 kmemtrace_mark_alloc_node(type_id, call_site, ptr,
69 bytes_req, bytes_alloc, gfp_flags, -1);
70}
71
72#endif /* __KERNEL__ */
73
74#endif /* _LINUX_KMEMTRACE_H */
75
diff --git a/include/trace/lockdep.h b/include/trace/lockdep.h
new file mode 100644
index 000000000000..5ca67df87f2a
--- /dev/null
+++ b/include/trace/lockdep.h
@@ -0,0 +1,9 @@
1#ifndef _TRACE_LOCKDEP_H
2#define _TRACE_LOCKDEP_H
3
4#include <linux/lockdep.h>
5#include <linux/tracepoint.h>
6
7#include <trace/lockdep_event_types.h>
8
9#endif
diff --git a/include/trace/lockdep_event_types.h b/include/trace/lockdep_event_types.h
new file mode 100644
index 000000000000..f713d74a82b4
--- /dev/null
+++ b/include/trace/lockdep_event_types.h
@@ -0,0 +1,44 @@
1
2#ifndef TRACE_EVENT_FORMAT
3# error Do not include this file directly.
4# error Unless you know what you are doing.
5#endif
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM lock
9
10#ifdef CONFIG_LOCKDEP
11
12TRACE_FORMAT(lock_acquire,
13 TPPROTO(struct lockdep_map *lock, unsigned int subclass,
14 int trylock, int read, int check,
15 struct lockdep_map *next_lock, unsigned long ip),
16 TPARGS(lock, subclass, trylock, read, check, next_lock, ip),
17 TPFMT("%s%s%s", trylock ? "try " : "",
18 read ? "read " : "", lock->name)
19 );
20
21TRACE_FORMAT(lock_release,
22 TPPROTO(struct lockdep_map *lock, int nested, unsigned long ip),
23 TPARGS(lock, nested, ip),
24 TPFMT("%s", lock->name)
25 );
26
27#ifdef CONFIG_LOCK_STAT
28
29TRACE_FORMAT(lock_contended,
30 TPPROTO(struct lockdep_map *lock, unsigned long ip),
31 TPARGS(lock, ip),
32 TPFMT("%s", lock->name)
33 );
34
35TRACE_FORMAT(lock_acquired,
36 TPPROTO(struct lockdep_map *lock, unsigned long ip),
37 TPARGS(lock, ip),
38 TPFMT("%s", lock->name)
39 );
40
41#endif
42#endif
43
44#undef TRACE_SYSTEM
diff --git a/include/trace/power.h b/include/trace/power.h
new file mode 100644
index 000000000000..2c733e58e89c
--- /dev/null
+++ b/include/trace/power.h
@@ -0,0 +1,34 @@
1#ifndef _TRACE_POWER_H
2#define _TRACE_POWER_H
3
4#include <linux/ktime.h>
5#include <linux/tracepoint.h>
6
7enum {
8 POWER_NONE = 0,
9 POWER_CSTATE = 1,
10 POWER_PSTATE = 2,
11};
12
13struct power_trace {
14#ifdef CONFIG_POWER_TRACER
15 ktime_t stamp;
16 ktime_t end;
17 int type;
18 int state;
19#endif
20};
21
22DECLARE_TRACE(power_start,
23 TPPROTO(struct power_trace *it, unsigned int type, unsigned int state),
24 TPARGS(it, type, state));
25
26DECLARE_TRACE(power_mark,
27 TPPROTO(struct power_trace *it, unsigned int type, unsigned int state),
28 TPARGS(it, type, state));
29
30DECLARE_TRACE(power_end,
31 TPPROTO(struct power_trace *it),
32 TPARGS(it));
33
34#endif /* _TRACE_POWER_H */
diff --git a/include/trace/sched.h b/include/trace/sched.h
index 0d81098ee9fc..4e372a1a29bf 100644
--- a/include/trace/sched.h
+++ b/include/trace/sched.h
@@ -4,53 +4,6 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6 6
7DECLARE_TRACE(sched_kthread_stop, 7#include <trace/sched_event_types.h>
8 TPPROTO(struct task_struct *t),
9 TPARGS(t));
10
11DECLARE_TRACE(sched_kthread_stop_ret,
12 TPPROTO(int ret),
13 TPARGS(ret));
14
15DECLARE_TRACE(sched_wait_task,
16 TPPROTO(struct rq *rq, struct task_struct *p),
17 TPARGS(rq, p));
18
19DECLARE_TRACE(sched_wakeup,
20 TPPROTO(struct rq *rq, struct task_struct *p, int success),
21 TPARGS(rq, p, success));
22
23DECLARE_TRACE(sched_wakeup_new,
24 TPPROTO(struct rq *rq, struct task_struct *p, int success),
25 TPARGS(rq, p, success));
26
27DECLARE_TRACE(sched_switch,
28 TPPROTO(struct rq *rq, struct task_struct *prev,
29 struct task_struct *next),
30 TPARGS(rq, prev, next));
31
32DECLARE_TRACE(sched_migrate_task,
33 TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
34 TPARGS(p, orig_cpu, dest_cpu));
35
36DECLARE_TRACE(sched_process_free,
37 TPPROTO(struct task_struct *p),
38 TPARGS(p));
39
40DECLARE_TRACE(sched_process_exit,
41 TPPROTO(struct task_struct *p),
42 TPARGS(p));
43
44DECLARE_TRACE(sched_process_wait,
45 TPPROTO(struct pid *pid),
46 TPARGS(pid));
47
48DECLARE_TRACE(sched_process_fork,
49 TPPROTO(struct task_struct *parent, struct task_struct *child),
50 TPARGS(parent, child));
51
52DECLARE_TRACE(sched_signal_send,
53 TPPROTO(int sig, struct task_struct *p),
54 TPARGS(sig, p));
55 8
56#endif 9#endif
diff --git a/include/trace/sched_event_types.h b/include/trace/sched_event_types.h
new file mode 100644
index 000000000000..a6de5c1601a0
--- /dev/null
+++ b/include/trace/sched_event_types.h
@@ -0,0 +1,151 @@
1
2/* use <trace/sched.h> instead */
3#ifndef TRACE_EVENT_FORMAT
4# error Do not include this file directly.
5# error Unless you know what you are doing.
6#endif
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM sched
10
11TRACE_EVENT_FORMAT(sched_kthread_stop,
12 TPPROTO(struct task_struct *t),
13 TPARGS(t),
14 TPFMT("task %s:%d", t->comm, t->pid),
15 TRACE_STRUCT(
16 TRACE_FIELD(pid_t, pid, t->pid)
17 ),
18 TPRAWFMT("task %d")
19 );
20
21TRACE_EVENT_FORMAT(sched_kthread_stop_ret,
22 TPPROTO(int ret),
23 TPARGS(ret),
24 TPFMT("ret=%d", ret),
25 TRACE_STRUCT(
26 TRACE_FIELD(int, ret, ret)
27 ),
28 TPRAWFMT("ret=%d")
29 );
30
31TRACE_EVENT_FORMAT(sched_wait_task,
32 TPPROTO(struct rq *rq, struct task_struct *p),
33 TPARGS(rq, p),
34 TPFMT("task %s:%d", p->comm, p->pid),
35 TRACE_STRUCT(
36 TRACE_FIELD(pid_t, pid, p->pid)
37 ),
38 TPRAWFMT("task %d")
39 );
40
41TRACE_EVENT_FORMAT(sched_wakeup,
42 TPPROTO(struct rq *rq, struct task_struct *p, int success),
43 TPARGS(rq, p, success),
44 TPFMT("task %s:%d %s",
45 p->comm, p->pid, success ? "succeeded" : "failed"),
46 TRACE_STRUCT(
47 TRACE_FIELD(pid_t, pid, p->pid)
48 TRACE_FIELD(int, success, success)
49 ),
50 TPRAWFMT("task %d success=%d")
51 );
52
53TRACE_EVENT_FORMAT(sched_wakeup_new,
54 TPPROTO(struct rq *rq, struct task_struct *p, int success),
55 TPARGS(rq, p, success),
56 TPFMT("task %s:%d",
57 p->comm, p->pid, success ? "succeeded" : "failed"),
58 TRACE_STRUCT(
59 TRACE_FIELD(pid_t, pid, p->pid)
60 TRACE_FIELD(int, success, success)
61 ),
62 TPRAWFMT("task %d success=%d")
63 );
64
65TRACE_EVENT_FORMAT(sched_switch,
66 TPPROTO(struct rq *rq, struct task_struct *prev,
67 struct task_struct *next),
68 TPARGS(rq, prev, next),
69 TPFMT("task %s:%d ==> %s:%d",
70 prev->comm, prev->pid, next->comm, next->pid),
71 TRACE_STRUCT(
72 TRACE_FIELD(pid_t, prev_pid, prev->pid)
73 TRACE_FIELD(int, prev_prio, prev->prio)
74 TRACE_FIELD_SPECIAL(char next_comm[TASK_COMM_LEN],
75 next_comm,
76 TPCMD(memcpy(TRACE_ENTRY->next_comm,
77 next->comm,
78 TASK_COMM_LEN)))
79 TRACE_FIELD(pid_t, next_pid, next->pid)
80 TRACE_FIELD(int, next_prio, next->prio)
81 ),
82 TPRAWFMT("prev %d:%d ==> next %s:%d:%d")
83 );
84
85TRACE_EVENT_FORMAT(sched_migrate_task,
86 TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
87 TPARGS(p, orig_cpu, dest_cpu),
88 TPFMT("task %s:%d from: %d to: %d",
89 p->comm, p->pid, orig_cpu, dest_cpu),
90 TRACE_STRUCT(
91 TRACE_FIELD(pid_t, pid, p->pid)
92 TRACE_FIELD(int, orig_cpu, orig_cpu)
93 TRACE_FIELD(int, dest_cpu, dest_cpu)
94 ),
95 TPRAWFMT("task %d from: %d to: %d")
96 );
97
98TRACE_EVENT_FORMAT(sched_process_free,
99 TPPROTO(struct task_struct *p),
100 TPARGS(p),
101 TPFMT("task %s:%d", p->comm, p->pid),
102 TRACE_STRUCT(
103 TRACE_FIELD(pid_t, pid, p->pid)
104 ),
105 TPRAWFMT("task %d")
106 );
107
108TRACE_EVENT_FORMAT(sched_process_exit,
109 TPPROTO(struct task_struct *p),
110 TPARGS(p),
111 TPFMT("task %s:%d", p->comm, p->pid),
112 TRACE_STRUCT(
113 TRACE_FIELD(pid_t, pid, p->pid)
114 ),
115 TPRAWFMT("task %d")
116 );
117
118TRACE_EVENT_FORMAT(sched_process_wait,
119 TPPROTO(struct pid *pid),
120 TPARGS(pid),
121 TPFMT("pid %d", pid_nr(pid)),
122 TRACE_STRUCT(
123 TRACE_FIELD(pid_t, pid, pid_nr(pid))
124 ),
125 TPRAWFMT("task %d")
126 );
127
128TRACE_EVENT_FORMAT(sched_process_fork,
129 TPPROTO(struct task_struct *parent, struct task_struct *child),
130 TPARGS(parent, child),
131 TPFMT("parent %s:%d child %s:%d",
132 parent->comm, parent->pid, child->comm, child->pid),
133 TRACE_STRUCT(
134 TRACE_FIELD(pid_t, parent, parent->pid)
135 TRACE_FIELD(pid_t, child, child->pid)
136 ),
137 TPRAWFMT("parent %d child %d")
138 );
139
140TRACE_EVENT_FORMAT(sched_signal_send,
141 TPPROTO(int sig, struct task_struct *p),
142 TPARGS(sig, p),
143 TPFMT("sig: %d task %s:%d", sig, p->comm, p->pid),
144 TRACE_STRUCT(
145 TRACE_FIELD(int, sig, sig)
146 TRACE_FIELD(pid_t, pid, p->pid)
147 ),
148 TPRAWFMT("sig: %d task %d")
149 );
150
151#undef TRACE_SYSTEM
diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h
new file mode 100644
index 000000000000..df56f5694be6
--- /dev/null
+++ b/include/trace/trace_event_types.h
@@ -0,0 +1,5 @@
1/* trace/<type>_event_types.h here */
2
3#include <trace/sched_event_types.h>
4#include <trace/irq_event_types.h>
5#include <trace/lockdep_event_types.h>
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
new file mode 100644
index 000000000000..fd13750ca4ba
--- /dev/null
+++ b/include/trace/trace_events.h
@@ -0,0 +1,5 @@
1/* trace/<type>.h here */
2
3#include <trace/sched.h>
4#include <trace/irq.h>
5#include <trace/lockdep.h>
diff --git a/include/trace/workqueue.h b/include/trace/workqueue.h
new file mode 100644
index 000000000000..867829df4571
--- /dev/null
+++ b/include/trace/workqueue.h
@@ -0,0 +1,25 @@
1#ifndef __TRACE_WORKQUEUE_H
2#define __TRACE_WORKQUEUE_H
3
4#include <linux/tracepoint.h>
5#include <linux/workqueue.h>
6#include <linux/sched.h>
7
8DECLARE_TRACE(workqueue_insertion,
9 TPPROTO(struct task_struct *wq_thread, struct work_struct *work),
10 TPARGS(wq_thread, work));
11
12DECLARE_TRACE(workqueue_execution,
13 TPPROTO(struct task_struct *wq_thread, struct work_struct *work),
14 TPARGS(wq_thread, work));
15
16/* Trace the creation of one workqueue thread on a cpu */
17DECLARE_TRACE(workqueue_creation,
18 TPPROTO(struct task_struct *wq_thread, int cpu),
19 TPARGS(wq_thread, cpu));
20
21DECLARE_TRACE(workqueue_destruction,
22 TPPROTO(struct task_struct *wq_thread),
23 TPARGS(wq_thread));
24
25#endif /* __TRACE_WORKQUEUE_H */