diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 11 | ||||
-rw-r--r-- | include/linux/blktrace_api.h | 5 | ||||
-rw-r--r-- | include/linux/ftrace.h | 126 | ||||
-rw-r--r-- | include/linux/ftrace_irq.h | 2 | ||||
-rw-r--r-- | include/linux/hardirq.h | 73 | ||||
-rw-r--r-- | include/linux/ring_buffer.h | 22 | ||||
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | include/linux/slab_def.h | 68 | ||||
-rw-r--r-- | include/linux/slob_def.h | 9 | ||||
-rw-r--r-- | include/linux/slub_def.h | 72 | ||||
-rw-r--r-- | include/linux/tracepoint.h | 3 | ||||
-rw-r--r-- | include/trace/kmemtrace.h | 75 | ||||
-rw-r--r-- | include/trace/power.h | 34 | ||||
-rw-r--r-- | include/trace/sched.h | 49 | ||||
-rw-r--r-- | include/trace/sched_event_types.h | 72 | ||||
-rw-r--r-- | include/trace/workqueue.h | 25 |
16 files changed, 512 insertions, 136 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index c61fab1dd2f8..0add6b28c366 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -61,6 +61,14 @@ | |||
61 | #define BRANCH_PROFILE() | 61 | #define BRANCH_PROFILE() |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #ifdef CONFIG_EVENT_TRACER | ||
65 | #define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ | ||
66 | *(_ftrace_events) \ | ||
67 | VMLINUX_SYMBOL(__stop_ftrace_events) = .; | ||
68 | #else | ||
69 | #define FTRACE_EVENTS() | ||
70 | #endif | ||
71 | |||
64 | /* .data section */ | 72 | /* .data section */ |
65 | #define DATA_DATA \ | 73 | #define DATA_DATA \ |
66 | *(.data) \ | 74 | *(.data) \ |
@@ -81,7 +89,8 @@ | |||
81 | *(__tracepoints) \ | 89 | *(__tracepoints) \ |
82 | VMLINUX_SYMBOL(__stop___tracepoints) = .; \ | 90 | VMLINUX_SYMBOL(__stop___tracepoints) = .; \ |
83 | LIKELY_PROFILE() \ | 91 | LIKELY_PROFILE() \ |
84 | BRANCH_PROFILE() | 92 | BRANCH_PROFILE() \ |
93 | FTRACE_EVENTS() | ||
85 | 94 | ||
86 | #define RO_DATA(align) \ | 95 | #define RO_DATA(align) \ |
87 | . = ALIGN((align)); \ | 96 | . = ALIGN((align)); \ |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 6e915878e88c..d960889e92ef 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -144,6 +144,9 @@ struct blk_user_trace_setup { | |||
144 | 144 | ||
145 | #ifdef __KERNEL__ | 145 | #ifdef __KERNEL__ |
146 | #if defined(CONFIG_BLK_DEV_IO_TRACE) | 146 | #if defined(CONFIG_BLK_DEV_IO_TRACE) |
147 | |||
148 | #include <linux/sysfs.h> | ||
149 | |||
147 | struct blk_trace { | 150 | struct blk_trace { |
148 | int trace_state; | 151 | int trace_state; |
149 | struct rchan *rchan; | 152 | struct rchan *rchan; |
@@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
194 | extern int blk_trace_startstop(struct request_queue *q, int start); | 197 | extern int blk_trace_startstop(struct request_queue *q, int start); |
195 | extern int blk_trace_remove(struct request_queue *q); | 198 | extern int blk_trace_remove(struct request_queue *q); |
196 | 199 | ||
200 | extern struct attribute_group blk_trace_attr_group; | ||
201 | |||
197 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ | 202 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ |
198 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) | 203 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) |
199 | #define blk_trace_shutdown(q) do { } while (0) | 204 | #define blk_trace_shutdown(q) do { } while (0) |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 677432b9cb7e..847bb3c48dd0 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -95,10 +95,44 @@ stack_trace_sysctl(struct ctl_table *table, int write, | |||
95 | loff_t *ppos); | 95 | loff_t *ppos); |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | struct ftrace_func_command { | ||
99 | struct list_head list; | ||
100 | char *name; | ||
101 | int (*func)(char *func, char *cmd, | ||
102 | char *params, int enable); | ||
103 | }; | ||
104 | |||
98 | #ifdef CONFIG_DYNAMIC_FTRACE | 105 | #ifdef CONFIG_DYNAMIC_FTRACE |
99 | /* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ | 106 | /* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ |
100 | #include <asm/ftrace.h> | 107 | #include <asm/ftrace.h> |
101 | 108 | ||
109 | int ftrace_arch_code_modify_prepare(void); | ||
110 | int ftrace_arch_code_modify_post_process(void); | ||
111 | |||
112 | struct seq_file; | ||
113 | |||
114 | struct ftrace_probe_ops { | ||
115 | void (*func)(unsigned long ip, | ||
116 | unsigned long parent_ip, | ||
117 | void **data); | ||
118 | int (*callback)(unsigned long ip, void **data); | ||
119 | void (*free)(void **data); | ||
120 | int (*print)(struct seq_file *m, | ||
121 | unsigned long ip, | ||
122 | struct ftrace_probe_ops *ops, | ||
123 | void *data); | ||
124 | }; | ||
125 | |||
126 | extern int | ||
127 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
128 | void *data); | ||
129 | extern void | ||
130 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
131 | void *data); | ||
132 | extern void | ||
133 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); | ||
134 | extern void unregister_ftrace_function_probe_all(char *glob); | ||
135 | |||
102 | enum { | 136 | enum { |
103 | FTRACE_FL_FREE = (1 << 0), | 137 | FTRACE_FL_FREE = (1 << 0), |
104 | FTRACE_FL_FAILED = (1 << 1), | 138 | FTRACE_FL_FAILED = (1 << 1), |
@@ -119,6 +153,9 @@ struct dyn_ftrace { | |||
119 | int ftrace_force_update(void); | 153 | int ftrace_force_update(void); |
120 | void ftrace_set_filter(unsigned char *buf, int len, int reset); | 154 | void ftrace_set_filter(unsigned char *buf, int len, int reset); |
121 | 155 | ||
156 | int register_ftrace_command(struct ftrace_func_command *cmd); | ||
157 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | ||
158 | |||
122 | /* defined in arch */ | 159 | /* defined in arch */ |
123 | extern int ftrace_ip_converted(unsigned long ip); | 160 | extern int ftrace_ip_converted(unsigned long ip); |
124 | extern int ftrace_dyn_arch_init(void *data); | 161 | extern int ftrace_dyn_arch_init(void *data); |
@@ -126,6 +163,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func); | |||
126 | extern void ftrace_caller(void); | 163 | extern void ftrace_caller(void); |
127 | extern void ftrace_call(void); | 164 | extern void ftrace_call(void); |
128 | extern void mcount_call(void); | 165 | extern void mcount_call(void); |
166 | |||
167 | #ifndef FTRACE_ADDR | ||
168 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) | ||
169 | #endif | ||
129 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 170 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
130 | extern void ftrace_graph_caller(void); | 171 | extern void ftrace_graph_caller(void); |
131 | extern int ftrace_enable_ftrace_graph_caller(void); | 172 | extern int ftrace_enable_ftrace_graph_caller(void); |
@@ -136,7 +177,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } | |||
136 | #endif | 177 | #endif |
137 | 178 | ||
138 | /** | 179 | /** |
139 | * ftrace_make_nop - convert code into top | 180 | * ftrace_make_nop - convert code into nop |
140 | * @mod: module structure if called by module load initialization | 181 | * @mod: module structure if called by module load initialization |
141 | * @rec: the mcount call site record | 182 | * @rec: the mcount call site record |
142 | * @addr: the address that the call site should be calling | 183 | * @addr: the address that the call site should be calling |
@@ -198,6 +239,14 @@ extern void ftrace_enable_daemon(void); | |||
198 | # define ftrace_disable_daemon() do { } while (0) | 239 | # define ftrace_disable_daemon() do { } while (0) |
199 | # define ftrace_enable_daemon() do { } while (0) | 240 | # define ftrace_enable_daemon() do { } while (0) |
200 | static inline void ftrace_release(void *start, unsigned long size) { } | 241 | static inline void ftrace_release(void *start, unsigned long size) { } |
242 | static inline int register_ftrace_command(struct ftrace_func_command *cmd) | ||
243 | { | ||
244 | return -EINVAL; | ||
245 | } | ||
246 | static inline int unregister_ftrace_command(char *cmd_name) | ||
247 | { | ||
248 | return -EINVAL; | ||
249 | } | ||
201 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 250 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
202 | 251 | ||
203 | /* totally disable ftrace - can not re-enable after this */ | 252 | /* totally disable ftrace - can not re-enable after this */ |
@@ -298,6 +347,9 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); | |||
298 | extern int | 347 | extern int |
299 | __ftrace_printk(unsigned long ip, const char *fmt, ...) | 348 | __ftrace_printk(unsigned long ip, const char *fmt, ...) |
300 | __attribute__ ((format (printf, 2, 3))); | 349 | __attribute__ ((format (printf, 2, 3))); |
350 | # define ftrace_vprintk(fmt, ap) __ftrace_printk(_THIS_IP_, fmt, ap) | ||
351 | extern int | ||
352 | __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); | ||
301 | extern void ftrace_dump(void); | 353 | extern void ftrace_dump(void); |
302 | #else | 354 | #else |
303 | static inline void | 355 | static inline void |
@@ -313,6 +365,11 @@ ftrace_printk(const char *fmt, ...) | |||
313 | { | 365 | { |
314 | return 0; | 366 | return 0; |
315 | } | 367 | } |
368 | static inline int | ||
369 | ftrace_vprintk(const char *fmt, va_list ap) | ||
370 | { | ||
371 | return 0; | ||
372 | } | ||
316 | static inline void ftrace_dump(void) { } | 373 | static inline void ftrace_dump(void) { } |
317 | #endif | 374 | #endif |
318 | 375 | ||
@@ -327,36 +384,6 @@ ftrace_init_module(struct module *mod, | |||
327 | unsigned long *start, unsigned long *end) { } | 384 | unsigned long *start, unsigned long *end) { } |
328 | #endif | 385 | #endif |
329 | 386 | ||
330 | enum { | ||
331 | POWER_NONE = 0, | ||
332 | POWER_CSTATE = 1, | ||
333 | POWER_PSTATE = 2, | ||
334 | }; | ||
335 | |||
336 | struct power_trace { | ||
337 | #ifdef CONFIG_POWER_TRACER | ||
338 | ktime_t stamp; | ||
339 | ktime_t end; | ||
340 | int type; | ||
341 | int state; | ||
342 | #endif | ||
343 | }; | ||
344 | |||
345 | #ifdef CONFIG_POWER_TRACER | ||
346 | extern void trace_power_start(struct power_trace *it, unsigned int type, | ||
347 | unsigned int state); | ||
348 | extern void trace_power_mark(struct power_trace *it, unsigned int type, | ||
349 | unsigned int state); | ||
350 | extern void trace_power_end(struct power_trace *it); | ||
351 | #else | ||
352 | static inline void trace_power_start(struct power_trace *it, unsigned int type, | ||
353 | unsigned int state) { } | ||
354 | static inline void trace_power_mark(struct power_trace *it, unsigned int type, | ||
355 | unsigned int state) { } | ||
356 | static inline void trace_power_end(struct power_trace *it) { } | ||
357 | #endif | ||
358 | |||
359 | |||
360 | /* | 387 | /* |
361 | * Structure that defines an entry function trace. | 388 | * Structure that defines an entry function trace. |
362 | */ | 389 | */ |
@@ -380,6 +407,30 @@ struct ftrace_graph_ret { | |||
380 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 407 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
381 | 408 | ||
382 | /* | 409 | /* |
410 | * Stack of return addresses for functions | ||
411 | * of a thread. | ||
412 | * Used in struct thread_info | ||
413 | */ | ||
414 | struct ftrace_ret_stack { | ||
415 | unsigned long ret; | ||
416 | unsigned long func; | ||
417 | unsigned long long calltime; | ||
418 | }; | ||
419 | |||
420 | /* | ||
421 | * Primary handler of a function return. | ||
422 | * It relays on ftrace_return_to_handler. | ||
423 | * Defined in entry_32/64.S | ||
424 | */ | ||
425 | extern void return_to_handler(void); | ||
426 | |||
427 | extern int | ||
428 | ftrace_push_return_trace(unsigned long ret, unsigned long long time, | ||
429 | unsigned long func, int *depth); | ||
430 | extern void | ||
431 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); | ||
432 | |||
433 | /* | ||
383 | * Sometimes we don't want to trace a function with the function | 434 | * Sometimes we don't want to trace a function with the function |
384 | * graph tracer but we want them to keep traced by the usual function | 435 | * graph tracer but we want them to keep traced by the usual function |
385 | * tracer if the function graph tracer is not configured. | 436 | * tracer if the function graph tracer is not configured. |
@@ -492,4 +543,17 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) | |||
492 | 543 | ||
493 | #endif /* CONFIG_TRACING */ | 544 | #endif /* CONFIG_TRACING */ |
494 | 545 | ||
546 | |||
547 | #ifdef CONFIG_HW_BRANCH_TRACER | ||
548 | |||
549 | void trace_hw_branch(u64 from, u64 to); | ||
550 | void trace_hw_branch_oops(void); | ||
551 | |||
552 | #else /* CONFIG_HW_BRANCH_TRACER */ | ||
553 | |||
554 | static inline void trace_hw_branch(u64 from, u64 to) {} | ||
555 | static inline void trace_hw_branch_oops(void) {} | ||
556 | |||
557 | #endif /* CONFIG_HW_BRANCH_TRACER */ | ||
558 | |||
495 | #endif /* _LINUX_FTRACE_H */ | 559 | #endif /* _LINUX_FTRACE_H */ |
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index 366a054d0b05..dca7bf8cffe2 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _LINUX_FTRACE_IRQ_H | 2 | #define _LINUX_FTRACE_IRQ_H |
3 | 3 | ||
4 | 4 | ||
5 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) | 5 | #ifdef CONFIG_FTRACE_NMI_ENTER |
6 | extern void ftrace_nmi_enter(void); | 6 | extern void ftrace_nmi_enter(void); |
7 | extern void ftrace_nmi_exit(void); | 7 | extern void ftrace_nmi_exit(void); |
8 | #else | 8 | #else |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f83288347dda..faa1cf848bcd 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -15,55 +15,61 @@ | |||
15 | * - bits 0-7 are the preemption count (max preemption depth: 256) | 15 | * - bits 0-7 are the preemption count (max preemption depth: 256) |
16 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | 16 | * - bits 8-15 are the softirq count (max # of softirqs: 256) |
17 | * | 17 | * |
18 | * The hardirq count can be overridden per architecture, the default is: | 18 | * The hardirq count can in theory reach the same as NR_IRQS. |
19 | * In reality, the number of nested IRQS is limited to the stack | ||
20 | * size as well. For archs with over 1000 IRQS it is not practical | ||
21 | * to expect that they will all nest. We give a max of 10 bits for | ||
22 | * hardirq nesting. An arch may choose to give less than 10 bits. | ||
23 | * m68k expects it to be 8. | ||
19 | * | 24 | * |
20 | * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) | 25 | * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) |
21 | * - ( bit 28 is the PREEMPT_ACTIVE flag. ) | 26 | * - bit 26 is the NMI_MASK |
27 | * - bit 28 is the PREEMPT_ACTIVE flag | ||
22 | * | 28 | * |
23 | * PREEMPT_MASK: 0x000000ff | 29 | * PREEMPT_MASK: 0x000000ff |
24 | * SOFTIRQ_MASK: 0x0000ff00 | 30 | * SOFTIRQ_MASK: 0x0000ff00 |
25 | * HARDIRQ_MASK: 0x0fff0000 | 31 | * HARDIRQ_MASK: 0x03ff0000 |
32 | * NMI_MASK: 0x04000000 | ||
26 | */ | 33 | */ |
27 | #define PREEMPT_BITS 8 | 34 | #define PREEMPT_BITS 8 |
28 | #define SOFTIRQ_BITS 8 | 35 | #define SOFTIRQ_BITS 8 |
36 | #define NMI_BITS 1 | ||
29 | 37 | ||
30 | #ifndef HARDIRQ_BITS | 38 | #define MAX_HARDIRQ_BITS 10 |
31 | #define HARDIRQ_BITS 12 | ||
32 | 39 | ||
33 | #ifndef MAX_HARDIRQS_PER_CPU | 40 | #ifndef HARDIRQ_BITS |
34 | #define MAX_HARDIRQS_PER_CPU NR_IRQS | 41 | # define HARDIRQ_BITS MAX_HARDIRQ_BITS |
35 | #endif | 42 | #endif |
36 | 43 | ||
37 | /* | 44 | #if HARDIRQ_BITS > MAX_HARDIRQ_BITS |
38 | * The hardirq mask has to be large enough to have space for potentially | 45 | #error HARDIRQ_BITS too high! |
39 | * all IRQ sources in the system nesting on a single CPU. | ||
40 | */ | ||
41 | #if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU | ||
42 | # error HARDIRQ_BITS is too low! | ||
43 | #endif | ||
44 | #endif | 46 | #endif |
45 | 47 | ||
46 | #define PREEMPT_SHIFT 0 | 48 | #define PREEMPT_SHIFT 0 |
47 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | 49 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
48 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | 50 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) |
51 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) | ||
49 | 52 | ||
50 | #define __IRQ_MASK(x) ((1UL << (x))-1) | 53 | #define __IRQ_MASK(x) ((1UL << (x))-1) |
51 | 54 | ||
52 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | 55 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) |
53 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | 56 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) |
54 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) | 57 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) |
58 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) | ||
55 | 59 | ||
56 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) | 60 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) |
57 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) | 61 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) |
58 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | 62 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
63 | #define NMI_OFFSET (1UL << NMI_SHIFT) | ||
59 | 64 | ||
60 | #if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) | 65 | #if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) |
61 | #error PREEMPT_ACTIVE is too low! | 66 | #error PREEMPT_ACTIVE is too low! |
62 | #endif | 67 | #endif |
63 | 68 | ||
64 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) | 69 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
65 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) | 70 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
66 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) | 71 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
72 | | NMI_MASK)) | ||
67 | 73 | ||
68 | /* | 74 | /* |
69 | * Are we doing bottom half or hardware interrupt processing? | 75 | * Are we doing bottom half or hardware interrupt processing? |
@@ -73,6 +79,11 @@ | |||
73 | #define in_softirq() (softirq_count()) | 79 | #define in_softirq() (softirq_count()) |
74 | #define in_interrupt() (irq_count()) | 80 | #define in_interrupt() (irq_count()) |
75 | 81 | ||
82 | /* | ||
83 | * Are we in NMI context? | ||
84 | */ | ||
85 | #define in_nmi() (preempt_count() & NMI_MASK) | ||
86 | |||
76 | #if defined(CONFIG_PREEMPT) | 87 | #if defined(CONFIG_PREEMPT) |
77 | # define PREEMPT_INATOMIC_BASE kernel_locked() | 88 | # define PREEMPT_INATOMIC_BASE kernel_locked() |
78 | # define PREEMPT_CHECK_OFFSET 1 | 89 | # define PREEMPT_CHECK_OFFSET 1 |
@@ -164,20 +175,24 @@ extern void irq_enter(void); | |||
164 | */ | 175 | */ |
165 | extern void irq_exit(void); | 176 | extern void irq_exit(void); |
166 | 177 | ||
167 | #define nmi_enter() \ | 178 | #define nmi_enter() \ |
168 | do { \ | 179 | do { \ |
169 | ftrace_nmi_enter(); \ | 180 | ftrace_nmi_enter(); \ |
170 | lockdep_off(); \ | 181 | BUG_ON(in_nmi()); \ |
171 | rcu_nmi_enter(); \ | 182 | add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ |
172 | __irq_enter(); \ | 183 | lockdep_off(); \ |
184 | rcu_nmi_enter(); \ | ||
185 | trace_hardirq_enter(); \ | ||
173 | } while (0) | 186 | } while (0) |
174 | 187 | ||
175 | #define nmi_exit() \ | 188 | #define nmi_exit() \ |
176 | do { \ | 189 | do { \ |
177 | __irq_exit(); \ | 190 | trace_hardirq_exit(); \ |
178 | rcu_nmi_exit(); \ | 191 | rcu_nmi_exit(); \ |
179 | lockdep_on(); \ | 192 | lockdep_on(); \ |
180 | ftrace_nmi_exit(); \ | 193 | BUG_ON(!in_nmi()); \ |
194 | sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ | ||
195 | ftrace_nmi_exit(); \ | ||
181 | } while (0) | 196 | } while (0) |
182 | 197 | ||
183 | #endif /* LINUX_HARDIRQ_H */ | 198 | #endif /* LINUX_HARDIRQ_H */ |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index b3b359660082..f5e793d69bd3 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -8,7 +8,7 @@ struct ring_buffer; | |||
8 | struct ring_buffer_iter; | 8 | struct ring_buffer_iter; |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * Don't reference this struct directly, use functions below. | 11 | * Don't refer to this struct directly, use functions below. |
12 | */ | 12 | */ |
13 | struct ring_buffer_event { | 13 | struct ring_buffer_event { |
14 | u32 type:2, len:3, time_delta:27; | 14 | u32 type:2, len:3, time_delta:27; |
@@ -74,13 +74,10 @@ void ring_buffer_free(struct ring_buffer *buffer); | |||
74 | 74 | ||
75 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); | 75 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); |
76 | 76 | ||
77 | struct ring_buffer_event * | 77 | struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, |
78 | ring_buffer_lock_reserve(struct ring_buffer *buffer, | 78 | unsigned long length); |
79 | unsigned long length, | ||
80 | unsigned long *flags); | ||
81 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, | 79 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, |
82 | struct ring_buffer_event *event, | 80 | struct ring_buffer_event *event); |
83 | unsigned long flags); | ||
84 | int ring_buffer_write(struct ring_buffer *buffer, | 81 | int ring_buffer_write(struct ring_buffer *buffer, |
85 | unsigned long length, void *data); | 82 | unsigned long length, void *data); |
86 | 83 | ||
@@ -124,9 +121,20 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); | |||
124 | u64 ring_buffer_time_stamp(int cpu); | 121 | u64 ring_buffer_time_stamp(int cpu); |
125 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); | 122 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); |
126 | 123 | ||
124 | /* | ||
125 | * The below functions are fine to use outside the tracing facility. | ||
126 | */ | ||
127 | #ifdef CONFIG_RING_BUFFER | ||
127 | void tracing_on(void); | 128 | void tracing_on(void); |
128 | void tracing_off(void); | 129 | void tracing_off(void); |
129 | void tracing_off_permanent(void); | 130 | void tracing_off_permanent(void); |
131 | int tracing_is_on(void); | ||
132 | #else | ||
133 | static inline void tracing_on(void) { } | ||
134 | static inline void tracing_off(void) { } | ||
135 | static inline void tracing_off_permanent(void) { } | ||
136 | static inline int tracing_is_on(void) { return 0; } | ||
137 | #endif | ||
130 | 138 | ||
131 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); | 139 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); |
132 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); | 140 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8981e52c714f..426666dd8203 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -137,6 +137,8 @@ extern unsigned long nr_uninterruptible(void); | |||
137 | extern unsigned long nr_active(void); | 137 | extern unsigned long nr_active(void); |
138 | extern unsigned long nr_iowait(void); | 138 | extern unsigned long nr_iowait(void); |
139 | 139 | ||
140 | extern unsigned long get_parent_ip(unsigned long addr); | ||
141 | |||
140 | struct seq_file; | 142 | struct seq_file; |
141 | struct cfs_rq; | 143 | struct cfs_rq; |
142 | struct task_group; | 144 | struct task_group; |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 6ca6a7b66d75..f4523651fa42 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <trace/kmemtrace.h> | ||
17 | 18 | ||
18 | /* Size description struct for general caches. */ | 19 | /* Size description struct for general caches. */ |
19 | struct cache_sizes { | 20 | struct cache_sizes { |
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[]; | |||
28 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 29 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
29 | void *__kmalloc(size_t size, gfp_t flags); | 30 | void *__kmalloc(size_t size, gfp_t flags); |
30 | 31 | ||
31 | static inline void *kmalloc(size_t size, gfp_t flags) | 32 | #ifdef CONFIG_KMEMTRACE |
33 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); | ||
34 | extern size_t slab_buffer_size(struct kmem_cache *cachep); | ||
35 | #else | ||
36 | static __always_inline void * | ||
37 | kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | ||
32 | { | 38 | { |
39 | return kmem_cache_alloc(cachep, flags); | ||
40 | } | ||
41 | static inline size_t slab_buffer_size(struct kmem_cache *cachep) | ||
42 | { | ||
43 | return 0; | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
48 | { | ||
49 | struct kmem_cache *cachep; | ||
50 | void *ret; | ||
51 | |||
33 | if (__builtin_constant_p(size)) { | 52 | if (__builtin_constant_p(size)) { |
34 | int i = 0; | 53 | int i = 0; |
35 | 54 | ||
@@ -47,10 +66,17 @@ static inline void *kmalloc(size_t size, gfp_t flags) | |||
47 | found: | 66 | found: |
48 | #ifdef CONFIG_ZONE_DMA | 67 | #ifdef CONFIG_ZONE_DMA |
49 | if (flags & GFP_DMA) | 68 | if (flags & GFP_DMA) |
50 | return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, | 69 | cachep = malloc_sizes[i].cs_dmacachep; |
51 | flags); | 70 | else |
52 | #endif | 71 | #endif |
53 | return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); | 72 | cachep = malloc_sizes[i].cs_cachep; |
73 | |||
74 | ret = kmem_cache_alloc_notrace(cachep, flags); | ||
75 | |||
76 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, | ||
77 | size, slab_buffer_size(cachep), flags); | ||
78 | |||
79 | return ret; | ||
54 | } | 80 | } |
55 | return __kmalloc(size, flags); | 81 | return __kmalloc(size, flags); |
56 | } | 82 | } |
@@ -59,8 +85,25 @@ found: | |||
59 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | 85 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
60 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 86 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
61 | 87 | ||
62 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 88 | #ifdef CONFIG_KMEMTRACE |
89 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
90 | gfp_t flags, | ||
91 | int nodeid); | ||
92 | #else | ||
93 | static __always_inline void * | ||
94 | kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
95 | gfp_t flags, | ||
96 | int nodeid) | ||
97 | { | ||
98 | return kmem_cache_alloc_node(cachep, flags, nodeid); | ||
99 | } | ||
100 | #endif | ||
101 | |||
102 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
63 | { | 103 | { |
104 | struct kmem_cache *cachep; | ||
105 | void *ret; | ||
106 | |||
64 | if (__builtin_constant_p(size)) { | 107 | if (__builtin_constant_p(size)) { |
65 | int i = 0; | 108 | int i = 0; |
66 | 109 | ||
@@ -78,11 +121,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
78 | found: | 121 | found: |
79 | #ifdef CONFIG_ZONE_DMA | 122 | #ifdef CONFIG_ZONE_DMA |
80 | if (flags & GFP_DMA) | 123 | if (flags & GFP_DMA) |
81 | return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, | 124 | cachep = malloc_sizes[i].cs_dmacachep; |
82 | flags, node); | 125 | else |
83 | #endif | 126 | #endif |
84 | return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, | 127 | cachep = malloc_sizes[i].cs_cachep; |
85 | flags, node); | 128 | |
129 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); | ||
130 | |||
131 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, | ||
132 | ret, size, slab_buffer_size(cachep), | ||
133 | flags, node); | ||
134 | |||
135 | return ret; | ||
86 | } | 136 | } |
87 | return __kmalloc_node(size, flags, node); | 137 | return __kmalloc_node(size, flags, node); |
88 | } | 138 | } |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 59a3fa476ab9..0ec00b39d006 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h | |||
@@ -3,14 +3,15 @@ | |||
3 | 3 | ||
4 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 4 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
5 | 5 | ||
6 | static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 6 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, |
7 | gfp_t flags) | ||
7 | { | 8 | { |
8 | return kmem_cache_alloc_node(cachep, flags, -1); | 9 | return kmem_cache_alloc_node(cachep, flags, -1); |
9 | } | 10 | } |
10 | 11 | ||
11 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 12 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
12 | 13 | ||
13 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 14 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
14 | { | 15 | { |
15 | return __kmalloc_node(size, flags, node); | 16 | return __kmalloc_node(size, flags, node); |
16 | } | 17 | } |
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
23 | * kmalloc is the normal method of allocating memory | 24 | * kmalloc is the normal method of allocating memory |
24 | * in the kernel. | 25 | * in the kernel. |
25 | */ | 26 | */ |
26 | static inline void *kmalloc(size_t size, gfp_t flags) | 27 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
27 | { | 28 | { |
28 | return __kmalloc_node(size, flags, -1); | 29 | return __kmalloc_node(size, flags, -1); |
29 | } | 30 | } |
30 | 31 | ||
31 | static inline void *__kmalloc(size_t size, gfp_t flags) | 32 | static __always_inline void *__kmalloc(size_t size, gfp_t flags) |
32 | { | 33 | { |
33 | return kmalloc(size, flags); | 34 | return kmalloc(size, flags); |
34 | } | 35 | } |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 2f5c16b1aacd..9e3a575b2c30 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
13 | #include <trace/kmemtrace.h> | ||
13 | 14 | ||
14 | enum stat_item { | 15 | enum stat_item { |
15 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
@@ -121,10 +122,23 @@ struct kmem_cache { | |||
121 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | 122 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) |
122 | 123 | ||
123 | /* | 124 | /* |
125 | * Maximum kmalloc object size handled by SLUB. Larger object allocations | ||
126 | * are passed through to the page allocator. The page allocator "fastpath" | ||
127 | * is relatively slow so we need this value sufficiently high so that | ||
128 | * performance critical objects are allocated through the SLUB fastpath. | ||
129 | * | ||
130 | * This should be dropped to PAGE_SIZE / 2 once the page allocator | ||
131 | * "fastpath" becomes competitive with the slab allocator fastpaths. | ||
132 | */ | ||
133 | #define SLUB_MAX_SIZE (PAGE_SIZE) | ||
134 | |||
135 | #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1) | ||
136 | |||
137 | /* | ||
124 | * We keep the general caches in an array of slab caches that are used for | 138 | * We keep the general caches in an array of slab caches that are used for |
125 | * 2^x bytes of allocations. | 139 | * 2^x bytes of allocations. |
126 | */ | 140 | */ |
127 | extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; | 141 | extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; |
128 | 142 | ||
129 | /* | 143 | /* |
130 | * Sorry that the following has to be that ugly but some versions of GCC | 144 | * Sorry that the following has to be that ugly but some versions of GCC |
@@ -204,15 +218,33 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
204 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 218 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
205 | void *__kmalloc(size_t size, gfp_t flags); | 219 | void *__kmalloc(size_t size, gfp_t flags); |
206 | 220 | ||
221 | #ifdef CONFIG_KMEMTRACE | ||
222 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); | ||
223 | #else | ||
224 | static __always_inline void * | ||
225 | kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
226 | { | ||
227 | return kmem_cache_alloc(s, gfpflags); | ||
228 | } | ||
229 | #endif | ||
230 | |||
207 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | 231 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
208 | { | 232 | { |
209 | return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); | 233 | unsigned int order = get_order(size); |
234 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); | ||
235 | |||
236 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, | ||
237 | size, PAGE_SIZE << order, flags); | ||
238 | |||
239 | return ret; | ||
210 | } | 240 | } |
211 | 241 | ||
212 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 242 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
213 | { | 243 | { |
244 | void *ret; | ||
245 | |||
214 | if (__builtin_constant_p(size)) { | 246 | if (__builtin_constant_p(size)) { |
215 | if (size > PAGE_SIZE) | 247 | if (size > SLUB_MAX_SIZE) |
216 | return kmalloc_large(size, flags); | 248 | return kmalloc_large(size, flags); |
217 | 249 | ||
218 | if (!(flags & SLUB_DMA)) { | 250 | if (!(flags & SLUB_DMA)) { |
@@ -221,7 +253,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
221 | if (!s) | 253 | if (!s) |
222 | return ZERO_SIZE_PTR; | 254 | return ZERO_SIZE_PTR; |
223 | 255 | ||
224 | return kmem_cache_alloc(s, flags); | 256 | ret = kmem_cache_alloc_notrace(s, flags); |
257 | |||
258 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, | ||
259 | _THIS_IP_, ret, | ||
260 | size, s->size, flags); | ||
261 | |||
262 | return ret; | ||
225 | } | 263 | } |
226 | } | 264 | } |
227 | return __kmalloc(size, flags); | 265 | return __kmalloc(size, flags); |
@@ -231,16 +269,38 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
231 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 269 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
232 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 270 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
233 | 271 | ||
272 | #ifdef CONFIG_KMEMTRACE | ||
273 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
274 | gfp_t gfpflags, | ||
275 | int node); | ||
276 | #else | ||
277 | static __always_inline void * | ||
278 | kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
279 | gfp_t gfpflags, | ||
280 | int node) | ||
281 | { | ||
282 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
283 | } | ||
284 | #endif | ||
285 | |||
234 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 286 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
235 | { | 287 | { |
288 | void *ret; | ||
289 | |||
236 | if (__builtin_constant_p(size) && | 290 | if (__builtin_constant_p(size) && |
237 | size <= PAGE_SIZE && !(flags & SLUB_DMA)) { | 291 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { |
238 | struct kmem_cache *s = kmalloc_slab(size); | 292 | struct kmem_cache *s = kmalloc_slab(size); |
239 | 293 | ||
240 | if (!s) | 294 | if (!s) |
241 | return ZERO_SIZE_PTR; | 295 | return ZERO_SIZE_PTR; |
242 | 296 | ||
243 | return kmem_cache_alloc_node(s, flags, node); | 297 | ret = kmem_cache_alloc_node_notrace(s, flags, node); |
298 | |||
299 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
300 | _THIS_IP_, ret, | ||
301 | size, s->size, flags, node); | ||
302 | |||
303 | return ret; | ||
244 | } | 304 | } |
245 | return __kmalloc_node(size, flags, node); | 305 | return __kmalloc_node(size, flags, node); |
246 | } | 306 | } |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 757005458366..34ae464effff 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -153,4 +153,7 @@ static inline void tracepoint_synchronize_unregister(void) | |||
153 | synchronize_sched(); | 153 | synchronize_sched(); |
154 | } | 154 | } |
155 | 155 | ||
156 | #define DEFINE_TRACE_FMT(name, proto, args, fmt) \ | ||
157 | DECLARE_TRACE(name, TPPROTO(proto), TPARGS(args)) | ||
158 | |||
156 | #endif | 159 | #endif |
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h new file mode 100644 index 000000000000..ad8b7857855a --- /dev/null +++ b/include/trace/kmemtrace.h | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
3 | * | ||
4 | * This file is released under GPL version 2. | ||
5 | */ | ||
6 | |||
7 | #ifndef _LINUX_KMEMTRACE_H | ||
8 | #define _LINUX_KMEMTRACE_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/marker.h> | ||
14 | |||
15 | enum kmemtrace_type_id { | ||
16 | KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ | ||
17 | KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ | ||
18 | KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ | ||
19 | }; | ||
20 | |||
21 | #ifdef CONFIG_KMEMTRACE | ||
22 | |||
23 | extern void kmemtrace_init(void); | ||
24 | |||
25 | extern void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | ||
26 | unsigned long call_site, | ||
27 | const void *ptr, | ||
28 | size_t bytes_req, | ||
29 | size_t bytes_alloc, | ||
30 | gfp_t gfp_flags, | ||
31 | int node); | ||
32 | |||
33 | extern void kmemtrace_mark_free(enum kmemtrace_type_id type_id, | ||
34 | unsigned long call_site, | ||
35 | const void *ptr); | ||
36 | |||
37 | #else /* CONFIG_KMEMTRACE */ | ||
38 | |||
39 | static inline void kmemtrace_init(void) | ||
40 | { | ||
41 | } | ||
42 | |||
43 | static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | ||
44 | unsigned long call_site, | ||
45 | const void *ptr, | ||
46 | size_t bytes_req, | ||
47 | size_t bytes_alloc, | ||
48 | gfp_t gfp_flags, | ||
49 | int node) | ||
50 | { | ||
51 | } | ||
52 | |||
53 | static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id, | ||
54 | unsigned long call_site, | ||
55 | const void *ptr) | ||
56 | { | ||
57 | } | ||
58 | |||
59 | #endif /* CONFIG_KMEMTRACE */ | ||
60 | |||
61 | static inline void kmemtrace_mark_alloc(enum kmemtrace_type_id type_id, | ||
62 | unsigned long call_site, | ||
63 | const void *ptr, | ||
64 | size_t bytes_req, | ||
65 | size_t bytes_alloc, | ||
66 | gfp_t gfp_flags) | ||
67 | { | ||
68 | kmemtrace_mark_alloc_node(type_id, call_site, ptr, | ||
69 | bytes_req, bytes_alloc, gfp_flags, -1); | ||
70 | } | ||
71 | |||
72 | #endif /* __KERNEL__ */ | ||
73 | |||
74 | #endif /* _LINUX_KMEMTRACE_H */ | ||
75 | |||
diff --git a/include/trace/power.h b/include/trace/power.h new file mode 100644 index 000000000000..2c733e58e89c --- /dev/null +++ b/include/trace/power.h | |||
@@ -0,0 +1,34 @@ | |||
1 | #ifndef _TRACE_POWER_H | ||
2 | #define _TRACE_POWER_H | ||
3 | |||
4 | #include <linux/ktime.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | enum { | ||
8 | POWER_NONE = 0, | ||
9 | POWER_CSTATE = 1, | ||
10 | POWER_PSTATE = 2, | ||
11 | }; | ||
12 | |||
13 | struct power_trace { | ||
14 | #ifdef CONFIG_POWER_TRACER | ||
15 | ktime_t stamp; | ||
16 | ktime_t end; | ||
17 | int type; | ||
18 | int state; | ||
19 | #endif | ||
20 | }; | ||
21 | |||
22 | DECLARE_TRACE(power_start, | ||
23 | TPPROTO(struct power_trace *it, unsigned int type, unsigned int state), | ||
24 | TPARGS(it, type, state)); | ||
25 | |||
26 | DECLARE_TRACE(power_mark, | ||
27 | TPPROTO(struct power_trace *it, unsigned int type, unsigned int state), | ||
28 | TPARGS(it, type, state)); | ||
29 | |||
30 | DECLARE_TRACE(power_end, | ||
31 | TPPROTO(struct power_trace *it), | ||
32 | TPARGS(it)); | ||
33 | |||
34 | #endif /* _TRACE_POWER_H */ | ||
diff --git a/include/trace/sched.h b/include/trace/sched.h index 0d81098ee9fc..4e372a1a29bf 100644 --- a/include/trace/sched.h +++ b/include/trace/sched.h | |||
@@ -4,53 +4,6 @@ | |||
4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
5 | #include <linux/tracepoint.h> | 5 | #include <linux/tracepoint.h> |
6 | 6 | ||
7 | DECLARE_TRACE(sched_kthread_stop, | 7 | #include <trace/sched_event_types.h> |
8 | TPPROTO(struct task_struct *t), | ||
9 | TPARGS(t)); | ||
10 | |||
11 | DECLARE_TRACE(sched_kthread_stop_ret, | ||
12 | TPPROTO(int ret), | ||
13 | TPARGS(ret)); | ||
14 | |||
15 | DECLARE_TRACE(sched_wait_task, | ||
16 | TPPROTO(struct rq *rq, struct task_struct *p), | ||
17 | TPARGS(rq, p)); | ||
18 | |||
19 | DECLARE_TRACE(sched_wakeup, | ||
20 | TPPROTO(struct rq *rq, struct task_struct *p, int success), | ||
21 | TPARGS(rq, p, success)); | ||
22 | |||
23 | DECLARE_TRACE(sched_wakeup_new, | ||
24 | TPPROTO(struct rq *rq, struct task_struct *p, int success), | ||
25 | TPARGS(rq, p, success)); | ||
26 | |||
27 | DECLARE_TRACE(sched_switch, | ||
28 | TPPROTO(struct rq *rq, struct task_struct *prev, | ||
29 | struct task_struct *next), | ||
30 | TPARGS(rq, prev, next)); | ||
31 | |||
32 | DECLARE_TRACE(sched_migrate_task, | ||
33 | TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu), | ||
34 | TPARGS(p, orig_cpu, dest_cpu)); | ||
35 | |||
36 | DECLARE_TRACE(sched_process_free, | ||
37 | TPPROTO(struct task_struct *p), | ||
38 | TPARGS(p)); | ||
39 | |||
40 | DECLARE_TRACE(sched_process_exit, | ||
41 | TPPROTO(struct task_struct *p), | ||
42 | TPARGS(p)); | ||
43 | |||
44 | DECLARE_TRACE(sched_process_wait, | ||
45 | TPPROTO(struct pid *pid), | ||
46 | TPARGS(pid)); | ||
47 | |||
48 | DECLARE_TRACE(sched_process_fork, | ||
49 | TPPROTO(struct task_struct *parent, struct task_struct *child), | ||
50 | TPARGS(parent, child)); | ||
51 | |||
52 | DECLARE_TRACE(sched_signal_send, | ||
53 | TPPROTO(int sig, struct task_struct *p), | ||
54 | TPARGS(sig, p)); | ||
55 | 8 | ||
56 | #endif | 9 | #endif |
diff --git a/include/trace/sched_event_types.h b/include/trace/sched_event_types.h new file mode 100644 index 000000000000..a4f662940f4e --- /dev/null +++ b/include/trace/sched_event_types.h | |||
@@ -0,0 +1,72 @@ | |||
1 | |||
2 | /* use <trace/sched.h> instead */ | ||
3 | #ifndef DEFINE_TRACE_FMT | ||
4 | # error Do not include this file directly. | ||
5 | # error Unless you know what you are doing. | ||
6 | #endif | ||
7 | |||
8 | DEFINE_TRACE_FMT(sched_kthread_stop, | ||
9 | TPPROTO(struct task_struct *t), | ||
10 | TPARGS(t), | ||
11 | TPFMT("task %s:%d", t->comm, t->pid)); | ||
12 | |||
13 | DEFINE_TRACE_FMT(sched_kthread_stop_ret, | ||
14 | TPPROTO(int ret), | ||
15 | TPARGS(ret), | ||
16 | TPFMT("ret=%d", ret)); | ||
17 | |||
18 | DEFINE_TRACE_FMT(sched_wait_task, | ||
19 | TPPROTO(struct rq *rq, struct task_struct *p), | ||
20 | TPARGS(rq, p), | ||
21 | TPFMT("task %s:%d", p->comm, p->pid)); | ||
22 | |||
23 | DEFINE_TRACE_FMT(sched_wakeup, | ||
24 | TPPROTO(struct rq *rq, struct task_struct *p, int success), | ||
25 | TPARGS(rq, p, success), | ||
26 | TPFMT("task %s:%d %s", | ||
27 | p->comm, p->pid, success?"succeeded":"failed")); | ||
28 | |||
29 | DEFINE_TRACE_FMT(sched_wakeup_new, | ||
30 | TPPROTO(struct rq *rq, struct task_struct *p, int success), | ||
31 | TPARGS(rq, p, success), | ||
32 | TPFMT("task %s:%d", | ||
33 | p->comm, p->pid, success?"succeeded":"failed")); | ||
34 | |||
35 | DEFINE_TRACE_FMT(sched_switch, | ||
36 | TPPROTO(struct rq *rq, struct task_struct *prev, | ||
37 | struct task_struct *next), | ||
38 | TPARGS(rq, prev, next), | ||
39 | TPFMT("task %s:%d ==> %s:%d", | ||
40 | prev->comm, prev->pid, next->comm, next->pid)); | ||
41 | |||
42 | DEFINE_TRACE_FMT(sched_migrate_task, | ||
43 | TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu), | ||
44 | TPARGS(p, orig_cpu, dest_cpu), | ||
45 | TPFMT("task %s:%d from: %d to: %d", | ||
46 | p->comm, p->pid, orig_cpu, dest_cpu)); | ||
47 | |||
48 | DEFINE_TRACE_FMT(sched_process_free, | ||
49 | TPPROTO(struct task_struct *p), | ||
50 | TPARGS(p), | ||
51 | TPFMT("task %s:%d", p->comm, p->pid)); | ||
52 | |||
53 | DEFINE_TRACE_FMT(sched_process_exit, | ||
54 | TPPROTO(struct task_struct *p), | ||
55 | TPARGS(p), | ||
56 | TPFMT("task %s:%d", p->comm, p->pid)); | ||
57 | |||
58 | DEFINE_TRACE_FMT(sched_process_wait, | ||
59 | TPPROTO(struct pid *pid), | ||
60 | TPARGS(pid), | ||
61 | TPFMT("pid %d", pid)); | ||
62 | |||
63 | DEFINE_TRACE_FMT(sched_process_fork, | ||
64 | TPPROTO(struct task_struct *parent, struct task_struct *child), | ||
65 | TPARGS(parent, child), | ||
66 | TPFMT("parent %s:%d child %s:%d", | ||
67 | parent->comm, parent->pid, child->comm, child->pid)); | ||
68 | |||
69 | DEFINE_TRACE_FMT(sched_signal_send, | ||
70 | TPPROTO(int sig, struct task_struct *p), | ||
71 | TPARGS(sig, p), | ||
72 | TPFMT("sig: %d task %s:%d", sig, p->comm, p->pid)); | ||
diff --git a/include/trace/workqueue.h b/include/trace/workqueue.h new file mode 100644 index 000000000000..867829df4571 --- /dev/null +++ b/include/trace/workqueue.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef __TRACE_WORKQUEUE_H | ||
2 | #define __TRACE_WORKQUEUE_H | ||
3 | |||
4 | #include <linux/tracepoint.h> | ||
5 | #include <linux/workqueue.h> | ||
6 | #include <linux/sched.h> | ||
7 | |||
8 | DECLARE_TRACE(workqueue_insertion, | ||
9 | TPPROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
10 | TPARGS(wq_thread, work)); | ||
11 | |||
12 | DECLARE_TRACE(workqueue_execution, | ||
13 | TPPROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
14 | TPARGS(wq_thread, work)); | ||
15 | |||
16 | /* Trace the creation of one workqueue thread on a cpu */ | ||
17 | DECLARE_TRACE(workqueue_creation, | ||
18 | TPPROTO(struct task_struct *wq_thread, int cpu), | ||
19 | TPARGS(wq_thread, cpu)); | ||
20 | |||
21 | DECLARE_TRACE(workqueue_destruction, | ||
22 | TPPROTO(struct task_struct *wq_thread), | ||
23 | TPARGS(wq_thread)); | ||
24 | |||
25 | #endif /* __TRACE_WORKQUEUE_H */ | ||